Commit
·
63004e7
1
Parent(s):
e63c2c0
Update parquet files (step 109 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Call Of Duty Black Ops II [UPD] Crack Only-SKIDROW Torrent.md +0 -25
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (welcome 2007 hindi movie 720p torren) - Enjoy the best quality of the Indian blockbuster Welcome.md +0 -119
- spaces/1gistliPinn/ChatGPT4/Examples/Cherish Model 11.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/FIFA-14-crack [NEW]-V6-FINAL-3DM-exe.md +0 -9
- spaces/1toTree/lora_test/ppdiffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py +0 -253
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r18.py +0 -26
- spaces/AI-Hobbyist/Hoyo-RVC/gui.py +0 -698
- spaces/AI-Hobbyist/Hoyo-RVC/slicer2.py +0 -260
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/data_gen_utils.py +0 -357
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/params_model.py +0 -11
- spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/.ipynb_checkpoints/yolov6_s_fast-checkpoint.py +0 -124
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Better.py +0 -57
- spaces/Alcedo/yunmedia/resources/chatgpt-plugin/live2d/live2dcubismcore.min.js +0 -0
- spaces/AlexWang/lama/saicinpainting/evaluation/losses/lpips.py +0 -891
- spaces/Alpaca233/SadTalker/src/facerender/sync_batchnorm/unittest.py +0 -29
- spaces/Ameaou/academic-chatgpt3.1/main.py +0 -190
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/helpers.py +0 -119
- spaces/AndrewRWilliams/video-whisper/app.py +0 -82
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/README.md +0 -228
- spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/nasfcos_head.py +0 -75
- spaces/Andy1621/uniformer_image_detection/mmdet/utils/contextmanagers.py +0 -121
- spaces/Andy1621/uniformer_image_segmentation/configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py +0 -5
- spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/clipboard/clipboard.min.js +0 -7
- spaces/Ankita0512ghosh/Weather_bot/app.py +0 -83
- spaces/Aphrodite/stable-diffusion-2/app.py +0 -154
- spaces/AquaSuisei/ChatGPTXE/modules/utils.py +0 -536
- spaces/Armandoliv/cars-parts-segmentation-resnet18/README.md +0 -12
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/slconfig.py +0 -427
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/utils.py +0 -610
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/common.py +0 -424
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py +0 -14
- spaces/BartPoint/VoiceChange_Beta/util.py +0 -81
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py +0 -165
- spaces/BisratWorku/Bear_classifier/README.md +0 -13
- spaces/BlueRey/MendoBERT_QA/app.py +0 -40
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/events.py +0 -385
- spaces/CVPR/LIVE/thrust/thrust/system/cpp/memory_resource.h +0 -62
- spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/sort.h +0 -34
- spaces/CVPR/LIVE/thrust/thrust/type_traits/remove_cvref.h +0 -48
- spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/handler.js +0 -73
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/_version_info.py +0 -86
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dotenv/__init__.py +0 -49
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-75764f1c.js +0 -2
- spaces/DamarJati/DamarJati-NSFW-filter-DecentScan/app.py +0 -11
- spaces/Datasculptor/StyleGAN-NADA/e4e/models/__init__.py +0 -0
- spaces/Datasculptor/car-data/app.py +0 -73
- spaces/Duskfallcrew/darkstorm2150-Protogen_x5.8_Official_Release/app.py +0 -3
- spaces/ECCV2022/bytetrack/tutorials/centertrack/opts.py +0 -406
- spaces/ECCV2022/bytetrack/tutorials/cstrack/tracker.py +0 -542
- spaces/EPFL-VILAB/MultiMAE/utils/cross_entropy.py +0 -43
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Call Of Duty Black Ops II [UPD] Crack Only-SKIDROW Torrent.md
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download and Install Call of Duty Black Ops II Crack Only-SKIDROW Torrent</h1>
|
3 |
-
<p>Call of Duty Black Ops II is a first-person shooter video game developed by Treyarch and published by Activision. It is the ninth game in the Call of Duty series and a sequel to the 2010 game Call of Duty: Black Ops. The game was released worldwide on November 13, 2012 for Microsoft Windows, PlayStation 3, Xbox 360, and Wii U.</p>
|
4 |
-
<p>If you want to play the game without buying it, you can download and install a crack file that bypasses the game's protection and allows you to run it without a valid license. One of the most popular crack files for Call of Duty Black Ops II is the one released by SKIDROW, a group of hackers who specialize in cracking video games. In this article, we will show you how to download and install Call of Duty Black Ops II Crack Only-SKIDROW torrent using a torrent client.</p>
|
5 |
-
<h2>Call of Duty Black Ops II Crack Only-SKIDROW torrent</h2><br /><p><b><b>Download File</b> > <a href="https://byltly.com/2uKzea">https://byltly.com/2uKzea</a></b></p><br /><br />
|
6 |
-
<h2>Step 1: Download a torrent client</h2>
|
7 |
-
<p>A torrent client is a software that enables you to download files from other users who are sharing them on a peer-to-peer network. There are many torrent clients available online, such as uTorrent, BitTorrent, qBittorrent, etc. You can choose any one that suits your preferences and system requirements. Download and install the torrent client on your computer.</p>
|
8 |
-
<h2>Step 2: Download Call of Duty Black Ops II Crack Only-SKIDROW torrent</h2>
|
9 |
-
<p>Once you have installed the torrent client, you need to find and download the Call of Duty Black Ops II Crack Only-SKIDROW torrent file. A torrent file is a small file that contains information about the files you want to download, such as their names, sizes, locations, etc. You can find the Call of Duty Black Ops II Crack Only-SKIDROW torrent file on various websites that host torrents, such as LimeTorrents.to[^2^], MegaGames.com[^1^], Archive.org[^4^], etc. You can also use a search engine like Google or Bing to look for the torrent file.</p>
|
10 |
-
<p>Once you have found the torrent file, click on it to open it with your torrent client. The torrent client will start downloading the crack file from other users who are sharing it. The download speed may vary depending on your internet connection and the number of seeders (users who have the complete file) and leechers (users who are downloading the file) available. Wait until the download is complete.</p>
|
11 |
-
<h2>Step 3: Install Call of Duty Black Ops II Crack Only-SKIDROW</h2>
|
12 |
-
<p>After downloading the crack file, you need to install it on your computer. The crack file is usually compressed in a ZIP or RAR archive that you need to extract first using a software like WinRAR or 7-Zip. After extracting the archive, you will find a folder named SKIDROW that contains several files, such as Call.of.Duty.Black.Ops.II.Update.1.and.2.exe, SKIDROW.ini, steam_api.dll, etc.</p>
|
13 |
-
<p>To install the crack file, follow these steps:</p>
|
14 |
-
<ol>
|
15 |
-
<li>Run Call.of.Duty.Black.Ops.II.Update.1.and.2.exe and follow the instructions to update your game to the latest version.</li>
|
16 |
-
<li>Copy all the files from the SKIDROW folder to the main installation folder of Call of Duty Black Ops II and overwrite any existing files.</li>
|
17 |
-
<li>Block the game in your firewall and mark the cracked files as secure/trusted in your antivirus program to prevent them from being deleted or blocked.</li>
|
18 |
-
<li>Play the game by launching it from its executable file or from a shortcut on your desktop.</li>
|
19 |
-
<li>Support the developers by buying the game if you enjoy it!</li>
|
20 |
-
</ol>
|
21 |
-
<h2>Conclusion</h2>
|
22 |
-
<p>In this article, we have shown you how to download and install Call of Duty Black Ops II Crack Only-SKIDROW torrent using a torrent client. This method allows you to play the game without purchasing it, but it may also expose you to some risks, such as viruses, malware, legal issues, etc. Therefore, we do not condone or encourage piracy and we advise you to use this method</p>
|
23 |
-
<p></p> 81aa517590<br />
|
24 |
-
<br />
|
25 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (welcome 2007 hindi movie 720p torren) - Enjoy the best quality of the Indian blockbuster Welcome.md
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>HD Online Player (Welcome 2007 Hindi Movie 720p Torrent)</h1>
|
3 |
-
<p>If you are a fan of Bollywood comedy movies, you might have heard of Welcome, a 2007 film starring Akshay Kumar, Katrina Kaif, Anil Kapoor, Nana Patekar, Paresh Rawal and others. The movie is about a series of hilarious events that happen when a good-hearted gangster tries to find a suitable groom for his sister, who falls in love with a naive and innocent man. The movie was a huge hit at the box office and received positive reviews from critics and audiences alike.</p>
|
4 |
-
<h2>HD Online Player (welcome 2007 hindi movie 720p torren)</h2><br /><p><b><b>Download File</b> ○○○ <a href="https://byltly.com/2uKzwW">https://byltly.com/2uKzwW</a></b></p><br /><br />
|
5 |
-
<p>But what if you want to watch Welcome online in high definition (HD) quality? You might be wondering how to find a reliable and safe source to stream or download the movie in 720p resolution. Well, look no further, because we have got you covered. In this article, we will tell you how to use an HD online player to watch Welcome 2007 Hindi movie 720p torrent without any hassle.</p>
|
6 |
-
<h2>What is an HD online player?</h2>
|
7 |
-
<p>An HD online player is a software or web application that allows you to play video files from various sources, such as torrents, direct links, cloud storage, etc. An HD online player can also convert the video format and resolution according to your device and internet speed. Some of the benefits of using an HD online player are:</p>
|
8 |
-
<ul>
|
9 |
-
<li>You can watch videos in HD quality without downloading them to your device.</li>
|
10 |
-
<li>You can save your storage space and bandwidth by streaming videos online.</li>
|
11 |
-
<li>You can access a large collection of movies and shows from different genres and languages.</li>
|
12 |
-
<li>You can enjoy a smooth and uninterrupted viewing experience with fast buffering and loading.</li>
|
13 |
-
<li>You can adjust the playback speed, volume, subtitles, etc. according to your preference.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>How to use an HD online player to watch Welcome 2007 Hindi movie 720p torrent?</h2>
|
16 |
-
<p>There are many HD online players available on the internet, but not all of them are trustworthy and secure. Some of them may contain malware, viruses, ads, or spyware that can harm your device or compromise your privacy. Therefore, you need to be careful while choosing an HD online player to watch Welcome 2007 Hindi movie 720p torrent.</p>
|
17 |
-
<p>Welcome 2007 Hindi Movie 720p Download Free<br />
|
18 |
-
Welcome Full Movie Download Torrent<br />
|
19 |
-
Welcome Hd 720p Trailer Download<br />
|
20 |
-
Welcome Movie 720p Full HD<br />
|
21 |
-
Welcome Movie Download 720p – Home – PCMovie<br />
|
22 |
-
Welcome Movie Download 720p Video<br />
|
23 |
-
Welcome Movie Download 720p – Watch movies online for free<br />
|
24 |
-
Welcome Movie Download Movie 720p [Anees Bazmee]<br />
|
25 |
-
Welcome Download Movie Full Movie for Free in Hindi 720p<br />
|
26 |
-
Welcome Download site: www.thepiratebay.in<br />
|
27 |
-
Welcome 2007 Hindi HDRip 720p<br />
|
28 |
-
Welcome to the World of Free Films<br />
|
29 |
-
Welcome Free Movies Subtitles<br />
|
30 |
-
Welcome Download w/Torrent<br />
|
31 |
-
Welcome Play HD Online Player<br />
|
32 |
-
Welcome Stream on SoundCloud<br />
|
33 |
-
Welcome Audiobooks and Excerpts<br />
|
34 |
-
Welcome Genre: Drama, Action, Romance, Comedy<br />
|
35 |
-
Welcome IMDB Rating: 6.8/10<br />
|
36 |
-
Welcome Director: Anees Bazmee<br />
|
37 |
-
Welcome Cast: Akshay Kumar, Katrina Kaif, Nana Patekar<br />
|
38 |
-
Welcome Runtime: 2h 29mn<br />
|
39 |
-
Welcome Source: 1080p.AMZN.WEBDL.DDP5.1.H.264-Telly<br />
|
40 |
-
Welcome Video: AVC | 1280x544 1050 Kbps<br />
|
41 |
-
Welcome Audio: 2CH AAC Hindi<br />
|
42 |
-
Welcome Subtitles: English Softcoded<br />
|
43 |
-
Welcome Chapter: -<br />
|
44 |
-
Welcome Subscene Link: Indonesian, English<br />
|
45 |
-
Welcome Screenshot: View<br />
|
46 |
-
Welcome Trailer: Watch<br />
|
47 |
-
Welcome File: .mkv<br />
|
48 |
-
Welcome Size: 550 MB - 2.57 GB<br />
|
49 |
-
Welcome Quality: WEB-HD 480p, 720p & 1080p – Pahe.in<br />
|
50 |
-
Welcome Release Date: December 28, 2021<br />
|
51 |
-
Welcome Plot: A man falls in love with a beautiful woman, but later discovers that her brothers are gangsters.<br />
|
52 |
-
Welcome Comedy, Crime, Drama<br />
|
53 |
-
Welcome Net Energy Gain in Nuclear Fusion Experiment<br />
|
54 |
-
Welcome Korea Superconducting Tokamak Advanced Research Experiment<br />
|
55 |
-
Welcome Temperature of 100 Million°C for 30 Seconds<br />
|
56 |
-
Welcome Seven Times Hotter than the Core of the Sun<br />
|
57 |
-
Welcome Physics Problem to Engineering One<br />
|
58 |
-
Welcome Korea Institute of Fusion Energy<br />
|
59 |
-
Welcome New Scientist Article on Fusion Breakthrough<br />
|
60 |
-
Welcome The Sun News on Fusion Experiments<br />
|
61 |
-
Welcome Yahoo News on Fusion Reactor<br />
|
62 |
-
Welcome Wikipedia on Sun and Solar Core Temperature<br />
|
63 |
-
Welcome Montana Solar Physics on Sun's Core and Radiative Zone Temperature <br />
|
64 |
-
Welcome Cornell Astronomy on Sun's Layers Temperature <br />
|
65 |
-
Welcome NASA Fact Sheet on Sun and Solar Atmosphere Temperature <br />
|
66 |
-
Welcome Wikipedia on Solar Core Density and Composition</p>
|
67 |
-
<p>One of the best and most popular HD online players is Yify. Yify is a website that provides high-quality torrents of movies and shows in various resolutions, such as 720p, 1080p, and 4K. Yify also has an online player that lets you stream the torrents directly on your browser without downloading them. Yify is known for its fast speed, user-friendly interface, and minimal ads.</p>
|
68 |
-
<p>To use Yify's HD online player to watch Welcome 2007 Hindi movie 720p torrent, follow these simple steps:</p>
|
69 |
-
<ol>
|
70 |
-
<li>Go to Yify's official website: https://yts.mx/</li>
|
71 |
-
<li>Search for Welcome 2007 Hindi movie in the search bar or browse through the categories.</li>
|
72 |
-
<li>Select the movie from the results and click on it.</li>
|
73 |
-
<li>Choose the 720p torrent option and click on the play button next to it.</li>
|
74 |
-
<li>A new tab will open with the Yify online player. Wait for a few seconds for the video to load and buffer.</li>
|
75 |
-
<li>Enjoy watching Welcome 2007 Hindi movie 720p torrent in HD quality on your device.</li>
|
76 |
-
</ol>
|
77 |
-
<h2>What are some alternatives to Yify's HD online player?</h2>
|
78 |
-
<p>If you are not satisfied with Yify's HD online player or want to try some other options, here are some alternatives that you can use to watch Welcome 2007 Hindi movie 720p torrent:</p>
|
79 |
-
<ul>
|
80 |
-
<li><strong>SoundCloud:</strong> SoundCloud is a music streaming platform that also hosts some video files uploaded by users. You can find Welcome 2007 Hindi movie 720p torrent on SoundCloud by searching for it or following this link: https://soundcloud.com/eskitwirsont/welcome-2007-hindi-movie-720p-torrent</li>
|
81 |
-
<li><strong>Boatripz:</strong> Boatripz is a website that offers free online video conversion and downloading services. You can use Boatripz to convert Welcome 2007 Hindi movie 720p torrent into mp4 format and download it to your device or watch it online. To use Boatripz, go to this link: https://boatripz.com/wp-content/uploads/2022/12/noehen.pdf</li>
|
82 |
-
<li><strong>Eecoeats:</strong> Eecoeats is another website that provides free online video conversion and downloading services. You can also use Eecoeats to convert Welcome 2007 Hindi movie 720p torrent into mp4 format and download it to your device or watch it online. To use Eecoeats, go to this link: https://www.eecoeats.com/wp-content/uploads/2022/07/HD_Online_Player_welcome_2007_hindi_movie_720p_torren.pdf</li>
|
83 |
-
</ul>
|
84 |
-
<h2>Conclusion</h2>
|
85 |
-
<p>Welcome 2007 Hindi movie is a comedy masterpiece that you should not miss if you love Bollywood movies. You can watch it online in HD quality using an HD online player like Yify or any of its alternatives. We hope this article helped you find the best way to watch Welcome 2007 Hindi movie 720p torrent on your device.</p>
|
86 |
-
<h2>FAQs</h2>
|
87 |
-
<p>Here are some frequently asked questions about watching Welcome 2007 Hindi movie 720p torrent using an HD online player:</p>
|
88 |
-
<h3>Q: Is it legal to watch Welcome 2007 Hindi movie 720p torrent using an HD online player?</h3>
|
89 |
-
<p>A: It depends on your country's laws and regulations regarding piracy and copyright infringement. Some countries may allow you to watch Welcome 2007 Hindi movie 720p torrent using an HD online player for personal use only, while others may prohibit it completely. Therefore, you should check your local laws before using an HD online player to watch Welcome 2007 Hindi movie 720p torrent.</p>
|
90 |
-
<h3>Q: Is it safe to watch Welcome 2007 Hindi movie 720p torrent using an HD online player?</h3>
|
91 |
-
<p>Some HD online players may be reliable and secure, while others may contain malware, viruses, ads, or spyware that can harm your device or compromise your privacy. Therefore, you should always use a trusted and reputable HD online player like Yify or any of its alternatives to watch Welcome 2007 Hindi movie 720p torrent.</p>
|
92 |
-
<h3>Q: What are the benefits of watching Welcome 2007 Hindi movie 720p torrent using an HD online player?</h3>
|
93 |
-
<p>A: Some of the benefits of watching Welcome 2007 Hindi movie 720p torrent using an HD online player are:</p>
|
94 |
-
<ul>
|
95 |
-
<li>You can watch videos in HD quality without downloading them to your device.</li>
|
96 |
-
<li>You can save your storage space and bandwidth by streaming videos online.</li>
|
97 |
-
<li>You can access a large collection of movies and shows from different genres and languages.</li>
|
98 |
-
<li>You can enjoy a smooth and uninterrupted viewing experience with fast buffering and loading.</li>
|
99 |
-
<li>You can adjust the playback speed, volume, subtitles, etc. according to your preference.</li>
|
100 |
-
</ul>
|
101 |
-
<h3>Q: What are the drawbacks of watching Welcome 2007 Hindi movie 720p torrent using an HD online player?</h3>
|
102 |
-
<p>A: Some of the drawbacks of watching Welcome 2007 Hindi movie 720p torrent using an HD online player are:</p>
|
103 |
-
<ul>
|
104 |
-
<li>You may encounter some ads or pop-ups that may interrupt your viewing experience or redirect you to unwanted websites.</li>
|
105 |
-
<li>You may face some issues with the video quality, audio sync, subtitles, etc. depending on your internet connection and device compatibility.</li>
|
106 |
-
<li>You may violate some laws or regulations regarding piracy and copyright infringement depending on your country's laws.</li>
|
107 |
-
</ul>
|
108 |
-
<h3>Q: How can I improve my viewing experience while watching Welcome 2007 Hindi movie 720p torrent using an HD online player?</h3>
|
109 |
-
<p>A: Here are some tips that can help you improve your viewing experience while watching Welcome 2007 Hindi movie 720p torrent using an HD online player:</p>
|
110 |
-
<ul>
|
111 |
-
<li>Use a stable and fast internet connection with enough bandwidth for streaming videos in HD quality.</li>
|
112 |
-
<li>Use a compatible device with a good screen resolution and sound system for watching videos in HD quality.</li>
|
113 |
-
<li>Use headphones or earphones for better audio quality and immersion.</li>
|
114 |
-
<li>Use ad blockers or VPNs to avoid ads or pop-ups that may interrupt your viewing experience or redirect you to unwanted websites.</li>
|
115 |
-
<li>Use browser extensions or plugins that can enhance your video playback options such as speed control, volume control, subtitle control, etc.</li>
|
116 |
-
</ul>
|
117 |
-
</p> 0a6ba089eb<br />
|
118 |
-
<br />
|
119 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cherish Model 11.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>the goal of the cherish consortium is to support and advance hiv-related research by identifying, recruiting, and providing funding for young investigators with a strong commitment to the study of hiv/aids. the consortium is a collaboration of academic, government, and community partners. the consortium has developed a cherish national steering committee and has been awarded funding to support the cherish pilot study. over the next two years, the consortium will evaluate and refine the cherish protocol, conduct a pilot study to test the efficacy of the cherish intervention on hiv-related clinical outcomes, and assess the feasibility and acceptability of the cherish intervention. consortium members are: susan cohan, m.d., assistant professor of medicine, division of infectious diseases, department of medicine, massachusetts general hospital; megan curtis, m., clinical fellow, infectious diseases division, boston medical center; j.t. kapsimalis, m., infectious diseases division, brigham and women's hospital; steve kowdley, m., associate professor of medicine, division of infectious diseases, department of medicine, massachusetts general hospital; douglas o'malley, m., professor of medicine, division of infectious diseases, department of medicine, harvard medical school; michael perzanowski, m., assistant professor of medicine, division of infectious diseases, department of medicine, harvard medical school; and dennis shusterman, m., assistant professor of medicine, division of infectious diseases, department of medicine, massachusetts general hospital.</p>
|
3 |
-
<h2>cherish model 11</h2><br /><p><b><b>Download Zip</b> ✵ <a href="https://imgfil.com/2uxXeX">https://imgfil.com/2uxXeX</a></b></p><br /><br />
|
4 |
-
<p>cherish is equipped with 4 different laboratories; a molecular lab, a molecular analysis lab, a molecular data management lab, and a molecular analysis lab. these 4 different labs do molecular assays for the following target viruses; hepatitis c, hepatitis b, hiv, and hsv. cherish i had a similar set of facilities.</p> 899543212b<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/FIFA-14-crack [NEW]-V6-FINAL-3DM-exe.md
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
<h2>FIFA-14-CRACK-V6-FINAL-3DM-exe</h2><br /><p><b><b>Download Zip</b> 🌟 <a href="https://imgfil.com/2uxWXZ">https://imgfil.com/2uxWXZ</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
Cracked 3DM. this is not a torrent and it works fine on PC.net Filehoster: ... Estimated reading time: 2 minutes Nov 21, 2013 Issue 16: FIFA 14 not working in ... Download FIFA 14 game via torrent, you can free on our site at high speed.
|
4 |
-
Download FIFA 14 (2013) PC - torrent.
|
5 |
-
FIFA 14 (2013) PC Release year: 2013 Genre: Simulator, Sport (Soccer) Developer: EA Canada Publisher: Electronic Arts Publication type: License [Steam-Rip] Game version: 1.0.3 Platform: PC Interface language: Russian, English Voice language: Russian, English Medicine: Present (RELOADED)
|
6 |
-
Download FIFA 14 (2013) torrent for free here. 8a78ff9644<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
9 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/audio_diffusion/pipeline_audio_diffusion.py
DELETED
@@ -1,253 +0,0 @@
|
|
1 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
|
16 |
-
from math import acos, sin
|
17 |
-
from typing import List, Tuple, Union
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import paddle
|
21 |
-
from PIL import Image
|
22 |
-
|
23 |
-
from ...models import AutoencoderKL, UNet2DConditionModel
|
24 |
-
from ...pipeline_utils import (
|
25 |
-
AudioPipelineOutput,
|
26 |
-
BaseOutput,
|
27 |
-
DiffusionPipeline,
|
28 |
-
ImagePipelineOutput,
|
29 |
-
)
|
30 |
-
from ...schedulers import DDIMScheduler, DDPMScheduler
|
31 |
-
from .mel import Mel
|
32 |
-
|
33 |
-
|
34 |
-
class AudioDiffusionPipeline(DiffusionPipeline):
|
35 |
-
"""
|
36 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
37 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
38 |
-
|
39 |
-
Parameters:
|
40 |
-
vqae ([`AutoencoderKL`]): Variational AutoEncoder for Latent Audio Diffusion or None
|
41 |
-
unet ([`UNet2DConditionModel`]): UNET model
|
42 |
-
mel ([`Mel`]): transform audio <-> spectrogram
|
43 |
-
scheduler ([`DDIMScheduler` or `DDPMScheduler`]): de-noising scheduler
|
44 |
-
"""
|
45 |
-
|
46 |
-
_optional_components = ["vqvae"]
|
47 |
-
|
48 |
-
def __init__(
|
49 |
-
self,
|
50 |
-
vqvae: AutoencoderKL,
|
51 |
-
unet: UNet2DConditionModel,
|
52 |
-
mel: Mel,
|
53 |
-
scheduler: Union[DDIMScheduler, DDPMScheduler],
|
54 |
-
):
|
55 |
-
super().__init__()
|
56 |
-
self.register_modules(unet=unet, scheduler=scheduler, mel=mel, vqvae=vqvae)
|
57 |
-
|
58 |
-
def get_input_dims(self) -> Tuple:
|
59 |
-
"""Returns dimension of input image
|
60 |
-
|
61 |
-
Returns:
|
62 |
-
`Tuple`: (height, width)
|
63 |
-
"""
|
64 |
-
input_module = self.vqvae if self.vqvae is not None else self.unet
|
65 |
-
# For backwards compatibility
|
66 |
-
sample_size = (
|
67 |
-
(input_module.sample_size, input_module.sample_size)
|
68 |
-
if type(input_module.sample_size) == int
|
69 |
-
else input_module.sample_size
|
70 |
-
)
|
71 |
-
return sample_size
|
72 |
-
|
73 |
-
def get_default_steps(self) -> int:
|
74 |
-
"""Returns default number of steps recommended for inference
|
75 |
-
|
76 |
-
Returns:
|
77 |
-
`int`: number of steps
|
78 |
-
"""
|
79 |
-
return 50 if isinstance(self.scheduler, DDIMScheduler) else 1000
|
80 |
-
|
81 |
-
@paddle.no_grad()
|
82 |
-
def __call__(
|
83 |
-
self,
|
84 |
-
batch_size: int = 1,
|
85 |
-
audio_file: str = None,
|
86 |
-
raw_audio: np.ndarray = None,
|
87 |
-
slice: int = 0,
|
88 |
-
start_step: int = 0,
|
89 |
-
steps: int = None,
|
90 |
-
generator: paddle.Generator = None,
|
91 |
-
mask_start_secs: float = 0,
|
92 |
-
mask_end_secs: float = 0,
|
93 |
-
step_generator: paddle.Generator = None,
|
94 |
-
eta: float = 0,
|
95 |
-
noise: paddle.Tensor = None,
|
96 |
-
return_dict=True,
|
97 |
-
) -> Union[
|
98 |
-
Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]]
|
99 |
-
]:
|
100 |
-
"""Generate random mel spectrogram from audio input and convert to audio.
|
101 |
-
|
102 |
-
Args:
|
103 |
-
batch_size (`int`): number of samples to generate
|
104 |
-
audio_file (`str`): must be a file on disk due to Librosa limitation or
|
105 |
-
raw_audio (`np.ndarray`): audio as numpy array
|
106 |
-
slice (`int`): slice number of audio to convert
|
107 |
-
start_step (int): step to start from
|
108 |
-
steps (`int`): number of de-noising steps (defaults to 50 for DDIM, 1000 for DDPM)
|
109 |
-
generator (`paddle.Generator`): random number generator or None
|
110 |
-
mask_start_secs (`float`): number of seconds of audio to mask (not generate) at start
|
111 |
-
mask_end_secs (`float`): number of seconds of audio to mask (not generate) at end
|
112 |
-
step_generator (`paddle.Generator`): random number generator used to de-noise or None
|
113 |
-
eta (`float`): parameter between 0 and 1 used with DDIM scheduler
|
114 |
-
noise (`paddle.Tensor`): noise tensor of shape (batch_size, 1, height, width) or None
|
115 |
-
return_dict (`bool`): if True return AudioPipelineOutput, ImagePipelineOutput else Tuple
|
116 |
-
|
117 |
-
Returns:
|
118 |
-
`List[PIL Image]`: mel spectrograms (`float`, `List[np.ndarray]`): sample rate and raw audios
|
119 |
-
"""
|
120 |
-
|
121 |
-
steps = steps or self.get_default_steps()
|
122 |
-
self.scheduler.set_timesteps(steps)
|
123 |
-
step_generator = step_generator or generator
|
124 |
-
# For backwards compatibility
|
125 |
-
if type(self.unet.sample_size) == int:
|
126 |
-
self.unet.sample_size = (self.unet.sample_size, self.unet.sample_size)
|
127 |
-
input_dims = self.get_input_dims()
|
128 |
-
self.mel.set_resolution(x_res=input_dims[1], y_res=input_dims[0])
|
129 |
-
if noise is None:
|
130 |
-
noise = paddle.randn(
|
131 |
-
(batch_size, self.unet.in_channels, self.unet.sample_size[0], self.unet.sample_size[1]),
|
132 |
-
generator=generator,
|
133 |
-
)
|
134 |
-
images = noise
|
135 |
-
mask = None
|
136 |
-
|
137 |
-
if audio_file is not None or raw_audio is not None:
|
138 |
-
self.mel.load_audio(audio_file, raw_audio)
|
139 |
-
input_image = self.mel.audio_slice_to_image(slice)
|
140 |
-
input_image = np.frombuffer(input_image.tobytes(), dtype="uint8").reshape(
|
141 |
-
(input_image.height, input_image.width)
|
142 |
-
)
|
143 |
-
input_image = (input_image / 255) * 2 - 1
|
144 |
-
input_images = paddle.to_tensor(input_image[np.newaxis, :, :], dtype=paddle.float32)
|
145 |
-
|
146 |
-
if self.vqvae is not None:
|
147 |
-
input_images = self.vqvae.encode(paddle.unsqueeze(input_images, 0)).latent_dist.sample(
|
148 |
-
generator=generator
|
149 |
-
)[0]
|
150 |
-
input_images = 0.18215 * input_images
|
151 |
-
|
152 |
-
if start_step > 0:
|
153 |
-
images[0, 0] = self.scheduler.add_noise(input_images, noise, self.scheduler.timesteps[start_step - 1])
|
154 |
-
|
155 |
-
pixels_per_second = (
|
156 |
-
self.unet.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
|
157 |
-
)
|
158 |
-
mask_start = int(mask_start_secs * pixels_per_second)
|
159 |
-
mask_end = int(mask_end_secs * pixels_per_second)
|
160 |
-
mask = self.scheduler.add_noise(
|
161 |
-
input_images, noise, paddle.to_tensor(self.scheduler.timesteps[start_step:])
|
162 |
-
)
|
163 |
-
|
164 |
-
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
|
165 |
-
model_output = self.unet(images, t)["sample"]
|
166 |
-
|
167 |
-
if isinstance(self.scheduler, DDIMScheduler):
|
168 |
-
images = self.scheduler.step(
|
169 |
-
model_output=model_output, timestep=t, sample=images, eta=eta, generator=step_generator
|
170 |
-
)["prev_sample"]
|
171 |
-
else:
|
172 |
-
images = self.scheduler.step(
|
173 |
-
model_output=model_output, timestep=t, sample=images, generator=step_generator
|
174 |
-
)["prev_sample"]
|
175 |
-
|
176 |
-
if mask is not None:
|
177 |
-
if mask_start > 0:
|
178 |
-
images[:, :, :, :mask_start] = mask[:, step, :, :mask_start]
|
179 |
-
if mask_end > 0:
|
180 |
-
images[:, :, :, -mask_end:] = mask[:, step, :, -mask_end:]
|
181 |
-
|
182 |
-
if self.vqvae is not None:
|
183 |
-
# 0.18215 was scaling factor used in training to ensure unit variance
|
184 |
-
images = 1 / 0.18215 * images
|
185 |
-
images = self.vqvae.decode(images)["sample"]
|
186 |
-
|
187 |
-
images = (images / 2 + 0.5).clip(0, 1)
|
188 |
-
images = images.transpose([0, 2, 3, 1]).cast("float32").numpy()
|
189 |
-
images = (images * 255).round().astype("uint8")
|
190 |
-
images = list(
|
191 |
-
map(lambda _: Image.fromarray(_[:, :, 0]), images)
|
192 |
-
if images.shape[3] == 1
|
193 |
-
else map(lambda _: Image.fromarray(_, mode="RGB").convert("L"), images)
|
194 |
-
)
|
195 |
-
|
196 |
-
audios = list(map(lambda _: self.mel.image_to_audio(_), images))
|
197 |
-
if not return_dict:
|
198 |
-
return images, (self.mel.get_sample_rate(), audios)
|
199 |
-
|
200 |
-
return BaseOutput(**AudioPipelineOutput(np.array(audios)[:, np.newaxis, :]), **ImagePipelineOutput(images))
|
201 |
-
|
202 |
-
@paddle.no_grad()
|
203 |
-
def encode(self, images: List[Image.Image], steps: int = 50) -> np.ndarray:
|
204 |
-
"""Reverse step process: recover noisy image from generated image.
|
205 |
-
|
206 |
-
Args:
|
207 |
-
images (`List[PIL Image]`): list of images to encode
|
208 |
-
steps (`int`): number of encoding steps to perform (defaults to 50)
|
209 |
-
|
210 |
-
Returns:
|
211 |
-
`np.ndarray`: noise tensor of shape (batch_size, 1, height, width)
|
212 |
-
"""
|
213 |
-
|
214 |
-
# Only works with DDIM as this method is deterministic
|
215 |
-
assert isinstance(self.scheduler, DDIMScheduler)
|
216 |
-
self.scheduler.set_timesteps(steps)
|
217 |
-
sample = np.array(
|
218 |
-
[np.frombuffer(image.tobytes(), dtype="uint8").reshape((1, image.height, image.width)) for image in images]
|
219 |
-
)
|
220 |
-
sample = (sample / 255) * 2 - 1
|
221 |
-
sample = paddle.to_tensor(sample)
|
222 |
-
|
223 |
-
for t in self.progress_bar(paddle.flip(self.scheduler.timesteps, (0,))):
|
224 |
-
prev_timestep = t - self.scheduler.num_train_timesteps // self.scheduler.num_inference_steps
|
225 |
-
alpha_prod_t = self.scheduler.alphas_cumprod[t]
|
226 |
-
alpha_prod_t_prev = (
|
227 |
-
self.scheduler.alphas_cumprod[prev_timestep]
|
228 |
-
if prev_timestep >= 0
|
229 |
-
else self.scheduler.final_alpha_cumprod
|
230 |
-
)
|
231 |
-
beta_prod_t = 1 - alpha_prod_t
|
232 |
-
model_output = self.unet(sample, t)["sample"]
|
233 |
-
pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * model_output
|
234 |
-
sample = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
|
235 |
-
sample = sample * alpha_prod_t ** (0.5) + beta_prod_t ** (0.5) * model_output
|
236 |
-
|
237 |
-
return sample
|
238 |
-
|
239 |
-
@staticmethod
|
240 |
-
def slerp(x0: paddle.Tensor, x1: paddle.Tensor, alpha: float) -> paddle.Tensor:
|
241 |
-
"""Spherical Linear intERPolation
|
242 |
-
|
243 |
-
Args:
|
244 |
-
x0 (`paddle.Tensor`): first tensor to interpolate between
|
245 |
-
x1 (`paddle.Tensor`): seconds tensor to interpolate between
|
246 |
-
alpha (`float`): interpolation between 0 and 1
|
247 |
-
|
248 |
-
Returns:
|
249 |
-
`paddle.Tensor`: interpolated tensor
|
250 |
-
"""
|
251 |
-
|
252 |
-
theta = acos(paddle.dot(paddle.flatten(x0), paddle.flatten(x1)) / paddle.norm(x0) / paddle.norm(x1))
|
253 |
-
return sin((1 - alpha) * theta) * x0 / sin(theta) + sin(alpha * theta) * x1 / sin(theta)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r18.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
from easydict import EasyDict as edict
|
2 |
-
|
3 |
-
# make training faster
|
4 |
-
# our RAM is 256G
|
5 |
-
# mount -t tmpfs -o size=140G tmpfs /train_tmp
|
6 |
-
|
7 |
-
config = edict()
|
8 |
-
config.loss = "arcface"
|
9 |
-
config.network = "r18"
|
10 |
-
config.resume = False
|
11 |
-
config.output = None
|
12 |
-
config.embedding_size = 512
|
13 |
-
config.sample_rate = 1.0
|
14 |
-
config.fp16 = True
|
15 |
-
config.momentum = 0.9
|
16 |
-
config.weight_decay = 5e-4
|
17 |
-
config.batch_size = 128
|
18 |
-
config.lr = 0.1 # batch size is 512
|
19 |
-
|
20 |
-
config.rec = "/train_tmp/ms1m-retinaface-t1"
|
21 |
-
config.num_classes = 93431
|
22 |
-
config.num_image = 5179510
|
23 |
-
config.num_epoch = 25
|
24 |
-
config.warmup_epoch = -1
|
25 |
-
config.decay_epoch = [10, 16, 22]
|
26 |
-
config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/gui.py
DELETED
@@ -1,698 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
0416后的更新:
|
3 |
-
引入config中half
|
4 |
-
重建npy而不用填写
|
5 |
-
v2支持
|
6 |
-
无f0模型支持
|
7 |
-
修复
|
8 |
-
|
9 |
-
int16:
|
10 |
-
增加无索引支持
|
11 |
-
f0算法改harvest(怎么看就只有这个会影响CPU占用),但是不这么改效果不好
|
12 |
-
"""
|
13 |
-
import os, sys, traceback, re
|
14 |
-
|
15 |
-
import json
|
16 |
-
|
17 |
-
now_dir = os.getcwd()
|
18 |
-
sys.path.append(now_dir)
|
19 |
-
from config import Config
|
20 |
-
|
21 |
-
Config = Config()
|
22 |
-
import PySimpleGUI as sg
|
23 |
-
import sounddevice as sd
|
24 |
-
import noisereduce as nr
|
25 |
-
import numpy as np
|
26 |
-
from fairseq import checkpoint_utils
|
27 |
-
import librosa, torch, pyworld, faiss, time, threading
|
28 |
-
import torch.nn.functional as F
|
29 |
-
import torchaudio.transforms as tat
|
30 |
-
import scipy.signal as signal
|
31 |
-
|
32 |
-
|
33 |
-
# import matplotlib.pyplot as plt
|
34 |
-
from infer_pack.models import (
|
35 |
-
SynthesizerTrnMs256NSFsid,
|
36 |
-
SynthesizerTrnMs256NSFsid_nono,
|
37 |
-
SynthesizerTrnMs768NSFsid,
|
38 |
-
SynthesizerTrnMs768NSFsid_nono,
|
39 |
-
)
|
40 |
-
from i18n import I18nAuto
|
41 |
-
|
42 |
-
i18n = I18nAuto()
|
43 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
44 |
-
current_dir = os.getcwd()
|
45 |
-
|
46 |
-
|
47 |
-
class RVC:
|
48 |
-
def __init__(
|
49 |
-
self, key, hubert_path, pth_path, index_path, npy_path, index_rate
|
50 |
-
) -> None:
|
51 |
-
"""
|
52 |
-
初始化
|
53 |
-
"""
|
54 |
-
try:
|
55 |
-
self.f0_up_key = key
|
56 |
-
self.time_step = 160 / 16000 * 1000
|
57 |
-
self.f0_min = 50
|
58 |
-
self.f0_max = 1100
|
59 |
-
self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
|
60 |
-
self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
|
61 |
-
self.sr = 16000
|
62 |
-
self.window = 160
|
63 |
-
if index_rate != 0:
|
64 |
-
self.index = faiss.read_index(index_path)
|
65 |
-
# self.big_npy = np.load(npy_path)
|
66 |
-
self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
|
67 |
-
print("index search enabled")
|
68 |
-
self.index_rate = index_rate
|
69 |
-
model_path = hubert_path
|
70 |
-
print("load model(s) from {}".format(model_path))
|
71 |
-
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
|
72 |
-
[model_path],
|
73 |
-
suffix="",
|
74 |
-
)
|
75 |
-
self.model = models[0]
|
76 |
-
self.model = self.model.to(device)
|
77 |
-
if Config.is_half:
|
78 |
-
self.model = self.model.half()
|
79 |
-
else:
|
80 |
-
self.model = self.model.float()
|
81 |
-
self.model.eval()
|
82 |
-
cpt = torch.load(pth_path, map_location="cpu")
|
83 |
-
self.tgt_sr = cpt["config"][-1]
|
84 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
85 |
-
self.if_f0 = cpt.get("f0", 1)
|
86 |
-
self.version = cpt.get("version", "v1")
|
87 |
-
if self.version == "v1":
|
88 |
-
if self.if_f0 == 1:
|
89 |
-
self.net_g = SynthesizerTrnMs256NSFsid(
|
90 |
-
*cpt["config"], is_half=Config.is_half
|
91 |
-
)
|
92 |
-
else:
|
93 |
-
self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
94 |
-
elif self.version == "v2":
|
95 |
-
if self.if_f0 == 1:
|
96 |
-
self.net_g = SynthesizerTrnMs768NSFsid(
|
97 |
-
*cpt["config"], is_half=Config.is_half
|
98 |
-
)
|
99 |
-
else:
|
100 |
-
self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
101 |
-
del self.net_g.enc_q
|
102 |
-
print(self.net_g.load_state_dict(cpt["weight"], strict=False))
|
103 |
-
self.net_g.eval().to(device)
|
104 |
-
if Config.is_half:
|
105 |
-
self.net_g = self.net_g.half()
|
106 |
-
else:
|
107 |
-
self.net_g = self.net_g.float()
|
108 |
-
except:
|
109 |
-
print(traceback.format_exc())
|
110 |
-
|
111 |
-
def get_f0(self, x, f0_up_key, inp_f0=None):
|
112 |
-
x_pad = 1
|
113 |
-
f0_min = 50
|
114 |
-
f0_max = 1100
|
115 |
-
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
|
116 |
-
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
|
117 |
-
f0, t = pyworld.harvest(
|
118 |
-
x.astype(np.double),
|
119 |
-
fs=self.sr,
|
120 |
-
f0_ceil=f0_max,
|
121 |
-
f0_floor=f0_min,
|
122 |
-
frame_period=10,
|
123 |
-
)
|
124 |
-
f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
|
125 |
-
f0 = signal.medfilt(f0, 3)
|
126 |
-
f0 *= pow(2, f0_up_key / 12)
|
127 |
-
# with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
128 |
-
tf0 = self.sr // self.window # 每秒f0点数
|
129 |
-
if inp_f0 is not None:
|
130 |
-
delta_t = np.round(
|
131 |
-
(inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
|
132 |
-
).astype("int16")
|
133 |
-
replace_f0 = np.interp(
|
134 |
-
list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
|
135 |
-
)
|
136 |
-
shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
|
137 |
-
f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
|
138 |
-
# with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
|
139 |
-
f0bak = f0.copy()
|
140 |
-
f0_mel = 1127 * np.log(1 + f0 / 700)
|
141 |
-
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
|
142 |
-
f0_mel_max - f0_mel_min
|
143 |
-
) + 1
|
144 |
-
f0_mel[f0_mel <= 1] = 1
|
145 |
-
f0_mel[f0_mel > 255] = 255
|
146 |
-
f0_coarse = np.rint(f0_mel).astype(np.int)
|
147 |
-
return f0_coarse, f0bak # 1-0
|
148 |
-
|
149 |
-
def infer(self, feats: torch.Tensor) -> np.ndarray:
|
150 |
-
"""
|
151 |
-
推理函数
|
152 |
-
"""
|
153 |
-
audio = feats.clone().cpu().numpy()
|
154 |
-
assert feats.dim() == 1, feats.dim()
|
155 |
-
feats = feats.view(1, -1)
|
156 |
-
padding_mask = torch.BoolTensor(feats.shape).fill_(False)
|
157 |
-
if Config.is_half:
|
158 |
-
feats = feats.half()
|
159 |
-
else:
|
160 |
-
feats = feats.float()
|
161 |
-
inputs = {
|
162 |
-
"source": feats.to(device),
|
163 |
-
"padding_mask": padding_mask.to(device),
|
164 |
-
"output_layer": 9 if self.version == "v1" else 12,
|
165 |
-
}
|
166 |
-
torch.cuda.synchronize()
|
167 |
-
with torch.no_grad():
|
168 |
-
logits = self.model.extract_features(**inputs)
|
169 |
-
feats = (
|
170 |
-
self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
|
171 |
-
)
|
172 |
-
|
173 |
-
####索引优化
|
174 |
-
try:
|
175 |
-
if (
|
176 |
-
hasattr(self, "index")
|
177 |
-
and hasattr(self, "big_npy")
|
178 |
-
and self.index_rate != 0
|
179 |
-
):
|
180 |
-
npy = feats[0].cpu().numpy().astype("float32")
|
181 |
-
score, ix = self.index.search(npy, k=8)
|
182 |
-
weight = np.square(1 / score)
|
183 |
-
weight /= weight.sum(axis=1, keepdims=True)
|
184 |
-
npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
|
185 |
-
if Config.is_half:
|
186 |
-
npy = npy.astype("float16")
|
187 |
-
feats = (
|
188 |
-
torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate
|
189 |
-
+ (1 - self.index_rate) * feats
|
190 |
-
)
|
191 |
-
else:
|
192 |
-
print("index search FAIL or disabled")
|
193 |
-
except:
|
194 |
-
traceback.print_exc()
|
195 |
-
print("index search FAIL")
|
196 |
-
feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
|
197 |
-
torch.cuda.synchronize()
|
198 |
-
print(feats.shape)
|
199 |
-
if self.if_f0 == 1:
|
200 |
-
pitch, pitchf = self.get_f0(audio, self.f0_up_key)
|
201 |
-
p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存
|
202 |
-
else:
|
203 |
-
pitch, pitchf = None, None
|
204 |
-
p_len = min(feats.shape[1], 13000) # 太大了爆显存
|
205 |
-
torch.cuda.synchronize()
|
206 |
-
# print(feats.shape,pitch.shape)
|
207 |
-
feats = feats[:, :p_len, :]
|
208 |
-
if self.if_f0 == 1:
|
209 |
-
pitch = pitch[:p_len]
|
210 |
-
pitchf = pitchf[:p_len]
|
211 |
-
pitch = torch.LongTensor(pitch).unsqueeze(0).to(device)
|
212 |
-
pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device)
|
213 |
-
p_len = torch.LongTensor([p_len]).to(device)
|
214 |
-
ii = 0 # sid
|
215 |
-
sid = torch.LongTensor([ii]).to(device)
|
216 |
-
with torch.no_grad():
|
217 |
-
if self.if_f0 == 1:
|
218 |
-
infered_audio = (
|
219 |
-
self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
|
220 |
-
.data.cpu()
|
221 |
-
.float()
|
222 |
-
)
|
223 |
-
else:
|
224 |
-
infered_audio = (
|
225 |
-
self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float()
|
226 |
-
)
|
227 |
-
torch.cuda.synchronize()
|
228 |
-
return infered_audio
|
229 |
-
|
230 |
-
|
231 |
-
class GUIConfig:
|
232 |
-
def __init__(self) -> None:
|
233 |
-
self.hubert_path: str = ""
|
234 |
-
self.pth_path: str = ""
|
235 |
-
self.index_path: str = ""
|
236 |
-
self.npy_path: str = ""
|
237 |
-
self.pitch: int = 12
|
238 |
-
self.samplerate: int = 44100
|
239 |
-
self.block_time: float = 1.0 # s
|
240 |
-
self.buffer_num: int = 1
|
241 |
-
self.threhold: int = -30
|
242 |
-
self.crossfade_time: float = 0.08
|
243 |
-
self.extra_time: float = 0.04
|
244 |
-
self.I_noise_reduce = False
|
245 |
-
self.O_noise_reduce = False
|
246 |
-
self.index_rate = 0.3
|
247 |
-
|
248 |
-
|
249 |
-
class GUI:
|
250 |
-
def __init__(self) -> None:
|
251 |
-
self.config = GUIConfig()
|
252 |
-
self.flag_vc = False
|
253 |
-
|
254 |
-
self.launcher()
|
255 |
-
|
256 |
-
def load(self):
|
257 |
-
input_devices, output_devices, _, _ = self.get_devices()
|
258 |
-
try:
|
259 |
-
with open("values1.json", "r") as j:
|
260 |
-
data = json.load(j)
|
261 |
-
except:
|
262 |
-
with open("values1.json", "w") as j:
|
263 |
-
data = {
|
264 |
-
"pth_path": " ",
|
265 |
-
"index_path": " ",
|
266 |
-
"sg_input_device": input_devices[sd.default.device[0]],
|
267 |
-
"sg_output_device": output_devices[sd.default.device[1]],
|
268 |
-
"threhold": "-45",
|
269 |
-
"pitch": "0",
|
270 |
-
"index_rate": "0",
|
271 |
-
"block_time": "1",
|
272 |
-
"crossfade_length": "0.04",
|
273 |
-
"extra_time": "1",
|
274 |
-
}
|
275 |
-
return data
|
276 |
-
|
277 |
-
def launcher(self):
|
278 |
-
data = self.load()
|
279 |
-
sg.theme("LightBlue3")
|
280 |
-
input_devices, output_devices, _, _ = self.get_devices()
|
281 |
-
layout = [
|
282 |
-
[
|
283 |
-
sg.Frame(
|
284 |
-
title=i18n("加载模型"),
|
285 |
-
layout=[
|
286 |
-
[
|
287 |
-
sg.Input(
|
288 |
-
default_text="hubert_base.pt",
|
289 |
-
key="hubert_path",
|
290 |
-
disabled=True,
|
291 |
-
),
|
292 |
-
sg.FileBrowse(
|
293 |
-
i18n("Hubert模型"),
|
294 |
-
initial_folder=os.path.join(os.getcwd()),
|
295 |
-
file_types=((". pt"),),
|
296 |
-
),
|
297 |
-
],
|
298 |
-
[
|
299 |
-
sg.Input(
|
300 |
-
default_text=data.get("pth_path", ""),
|
301 |
-
key="pth_path",
|
302 |
-
),
|
303 |
-
sg.FileBrowse(
|
304 |
-
i18n("选择.pth文件"),
|
305 |
-
initial_folder=os.path.join(os.getcwd(), "weights"),
|
306 |
-
file_types=((". pth"),),
|
307 |
-
),
|
308 |
-
],
|
309 |
-
[
|
310 |
-
sg.Input(
|
311 |
-
default_text=data.get("index_path", ""),
|
312 |
-
key="index_path",
|
313 |
-
),
|
314 |
-
sg.FileBrowse(
|
315 |
-
i18n("选择.index文件"),
|
316 |
-
initial_folder=os.path.join(os.getcwd(), "logs"),
|
317 |
-
file_types=((". index"),),
|
318 |
-
),
|
319 |
-
],
|
320 |
-
[
|
321 |
-
sg.Input(
|
322 |
-
default_text="你不需要填写这个You don't need write this.",
|
323 |
-
key="npy_path",
|
324 |
-
disabled=True,
|
325 |
-
),
|
326 |
-
sg.FileBrowse(
|
327 |
-
i18n("选择.npy文件"),
|
328 |
-
initial_folder=os.path.join(os.getcwd(), "logs"),
|
329 |
-
file_types=((". npy"),),
|
330 |
-
),
|
331 |
-
],
|
332 |
-
],
|
333 |
-
)
|
334 |
-
],
|
335 |
-
[
|
336 |
-
sg.Frame(
|
337 |
-
layout=[
|
338 |
-
[
|
339 |
-
sg.Text(i18n("输入设备")),
|
340 |
-
sg.Combo(
|
341 |
-
input_devices,
|
342 |
-
key="sg_input_device",
|
343 |
-
default_value=data.get("sg_input_device", ""),
|
344 |
-
),
|
345 |
-
],
|
346 |
-
[
|
347 |
-
sg.Text(i18n("输出设备")),
|
348 |
-
sg.Combo(
|
349 |
-
output_devices,
|
350 |
-
key="sg_output_device",
|
351 |
-
default_value=data.get("sg_output_device", ""),
|
352 |
-
),
|
353 |
-
],
|
354 |
-
],
|
355 |
-
title=i18n("音频设备(请使用同种类驱动)"),
|
356 |
-
)
|
357 |
-
],
|
358 |
-
[
|
359 |
-
sg.Frame(
|
360 |
-
layout=[
|
361 |
-
[
|
362 |
-
sg.Text(i18n("响应阈值")),
|
363 |
-
sg.Slider(
|
364 |
-
range=(-60, 0),
|
365 |
-
key="threhold",
|
366 |
-
resolution=1,
|
367 |
-
orientation="h",
|
368 |
-
default_value=data.get("threhold", ""),
|
369 |
-
),
|
370 |
-
],
|
371 |
-
[
|
372 |
-
sg.Text(i18n("音调设置")),
|
373 |
-
sg.Slider(
|
374 |
-
range=(-24, 24),
|
375 |
-
key="pitch",
|
376 |
-
resolution=1,
|
377 |
-
orientation="h",
|
378 |
-
default_value=data.get("pitch", ""),
|
379 |
-
),
|
380 |
-
],
|
381 |
-
[
|
382 |
-
sg.Text(i18n("Index Rate")),
|
383 |
-
sg.Slider(
|
384 |
-
range=(0.0, 1.0),
|
385 |
-
key="index_rate",
|
386 |
-
resolution=0.01,
|
387 |
-
orientation="h",
|
388 |
-
default_value=data.get("index_rate", ""),
|
389 |
-
),
|
390 |
-
],
|
391 |
-
],
|
392 |
-
title=i18n("常规设置"),
|
393 |
-
),
|
394 |
-
sg.Frame(
|
395 |
-
layout=[
|
396 |
-
[
|
397 |
-
sg.Text(i18n("采样长度")),
|
398 |
-
sg.Slider(
|
399 |
-
range=(0.1, 3.0),
|
400 |
-
key="block_time",
|
401 |
-
resolution=0.1,
|
402 |
-
orientation="h",
|
403 |
-
default_value=data.get("block_time", ""),
|
404 |
-
),
|
405 |
-
],
|
406 |
-
[
|
407 |
-
sg.Text(i18n("淡入淡出长度")),
|
408 |
-
sg.Slider(
|
409 |
-
range=(0.01, 0.15),
|
410 |
-
key="crossfade_length",
|
411 |
-
resolution=0.01,
|
412 |
-
orientation="h",
|
413 |
-
default_value=data.get("crossfade_length", ""),
|
414 |
-
),
|
415 |
-
],
|
416 |
-
[
|
417 |
-
sg.Text(i18n("额外推理时长")),
|
418 |
-
sg.Slider(
|
419 |
-
range=(0.05, 3.00),
|
420 |
-
key="extra_time",
|
421 |
-
resolution=0.01,
|
422 |
-
orientation="h",
|
423 |
-
default_value=data.get("extra_time", ""),
|
424 |
-
),
|
425 |
-
],
|
426 |
-
[
|
427 |
-
sg.Checkbox(i18n("输入降噪"), key="I_noise_reduce"),
|
428 |
-
sg.Checkbox(i18n("输出降噪"), key="O_noise_reduce"),
|
429 |
-
],
|
430 |
-
],
|
431 |
-
title=i18n("性能设置"),
|
432 |
-
),
|
433 |
-
],
|
434 |
-
[
|
435 |
-
sg.Button(i18n("开始音频转换"), key="start_vc"),
|
436 |
-
sg.Button(i18n("停止音频转换"), key="stop_vc"),
|
437 |
-
sg.Text(i18n("推理时间(ms):")),
|
438 |
-
sg.Text("0", key="infer_time"),
|
439 |
-
],
|
440 |
-
]
|
441 |
-
self.window = sg.Window("RVC - GUI", layout=layout)
|
442 |
-
self.event_handler()
|
443 |
-
|
444 |
-
def event_handler(self):
|
445 |
-
while True:
|
446 |
-
event, values = self.window.read()
|
447 |
-
if event == sg.WINDOW_CLOSED:
|
448 |
-
self.flag_vc = False
|
449 |
-
exit()
|
450 |
-
if event == "start_vc" and self.flag_vc == False:
|
451 |
-
if self.set_values(values) == True:
|
452 |
-
print("using_cuda:" + str(torch.cuda.is_available()))
|
453 |
-
self.start_vc()
|
454 |
-
settings = {
|
455 |
-
"pth_path": values["pth_path"],
|
456 |
-
"index_path": values["index_path"],
|
457 |
-
"sg_input_device": values["sg_input_device"],
|
458 |
-
"sg_output_device": values["sg_output_device"],
|
459 |
-
"threhold": values["threhold"],
|
460 |
-
"pitch": values["pitch"],
|
461 |
-
"index_rate": values["index_rate"],
|
462 |
-
"block_time": values["block_time"],
|
463 |
-
"crossfade_length": values["crossfade_length"],
|
464 |
-
"extra_time": values["extra_time"],
|
465 |
-
}
|
466 |
-
with open("values1.json", "w") as j:
|
467 |
-
json.dump(settings, j)
|
468 |
-
if event == "stop_vc" and self.flag_vc == True:
|
469 |
-
self.flag_vc = False
|
470 |
-
|
471 |
-
def set_values(self, values):
|
472 |
-
if len(values["pth_path"].strip()) == 0:
|
473 |
-
sg.popup(i18n("请选择pth文件"))
|
474 |
-
return False
|
475 |
-
if len(values["index_path"].strip()) == 0:
|
476 |
-
sg.popup(i18n("请选择index文件"))
|
477 |
-
return False
|
478 |
-
pattern = re.compile("[^\x00-\x7F]+")
|
479 |
-
if pattern.findall(values["hubert_path"]):
|
480 |
-
sg.popup(i18n("hubert模型路径不可包含中文"))
|
481 |
-
return False
|
482 |
-
if pattern.findall(values["pth_path"]):
|
483 |
-
sg.popup(i18n("pth文件路径不可包含中文"))
|
484 |
-
return False
|
485 |
-
if pattern.findall(values["index_path"]):
|
486 |
-
sg.popup(i18n("index文件路径不可包含中文"))
|
487 |
-
return False
|
488 |
-
self.set_devices(values["sg_input_device"], values["sg_output_device"])
|
489 |
-
self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt")
|
490 |
-
self.config.pth_path = values["pth_path"]
|
491 |
-
self.config.index_path = values["index_path"]
|
492 |
-
self.config.npy_path = values["npy_path"]
|
493 |
-
self.config.threhold = values["threhold"]
|
494 |
-
self.config.pitch = values["pitch"]
|
495 |
-
self.config.block_time = values["block_time"]
|
496 |
-
self.config.crossfade_time = values["crossfade_length"]
|
497 |
-
self.config.extra_time = values["extra_time"]
|
498 |
-
self.config.I_noise_reduce = values["I_noise_reduce"]
|
499 |
-
self.config.O_noise_reduce = values["O_noise_reduce"]
|
500 |
-
self.config.index_rate = values["index_rate"]
|
501 |
-
return True
|
502 |
-
|
503 |
-
def start_vc(self):
|
504 |
-
torch.cuda.empty_cache()
|
505 |
-
self.flag_vc = True
|
506 |
-
self.block_frame = int(self.config.block_time * self.config.samplerate)
|
507 |
-
self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate)
|
508 |
-
self.sola_search_frame = int(0.012 * self.config.samplerate)
|
509 |
-
self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s
|
510 |
-
self.extra_frame = int(self.config.extra_time * self.config.samplerate)
|
511 |
-
self.rvc = None
|
512 |
-
self.rvc = RVC(
|
513 |
-
self.config.pitch,
|
514 |
-
self.config.hubert_path,
|
515 |
-
self.config.pth_path,
|
516 |
-
self.config.index_path,
|
517 |
-
self.config.npy_path,
|
518 |
-
self.config.index_rate,
|
519 |
-
)
|
520 |
-
self.input_wav: np.ndarray = np.zeros(
|
521 |
-
self.extra_frame
|
522 |
-
+ self.crossfade_frame
|
523 |
-
+ self.sola_search_frame
|
524 |
-
+ self.block_frame,
|
525 |
-
dtype="float32",
|
526 |
-
)
|
527 |
-
self.output_wav: torch.Tensor = torch.zeros(
|
528 |
-
self.block_frame, device=device, dtype=torch.float32
|
529 |
-
)
|
530 |
-
self.sola_buffer: torch.Tensor = torch.zeros(
|
531 |
-
self.crossfade_frame, device=device, dtype=torch.float32
|
532 |
-
)
|
533 |
-
self.fade_in_window: torch.Tensor = torch.linspace(
|
534 |
-
0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32
|
535 |
-
)
|
536 |
-
self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
|
537 |
-
self.resampler1 = tat.Resample(
|
538 |
-
orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
|
539 |
-
)
|
540 |
-
self.resampler2 = tat.Resample(
|
541 |
-
orig_freq=self.rvc.tgt_sr,
|
542 |
-
new_freq=self.config.samplerate,
|
543 |
-
dtype=torch.float32,
|
544 |
-
)
|
545 |
-
thread_vc = threading.Thread(target=self.soundinput)
|
546 |
-
thread_vc.start()
|
547 |
-
|
548 |
-
def soundinput(self):
|
549 |
-
"""
|
550 |
-
接受音频输入
|
551 |
-
"""
|
552 |
-
with sd.Stream(
|
553 |
-
callback=self.audio_callback,
|
554 |
-
blocksize=self.block_frame,
|
555 |
-
samplerate=self.config.samplerate,
|
556 |
-
dtype="float32",
|
557 |
-
):
|
558 |
-
while self.flag_vc:
|
559 |
-
time.sleep(self.config.block_time)
|
560 |
-
print("Audio block passed.")
|
561 |
-
print("ENDing VC")
|
562 |
-
|
563 |
-
def audio_callback(
|
564 |
-
self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
|
565 |
-
):
|
566 |
-
"""
|
567 |
-
音频处理
|
568 |
-
"""
|
569 |
-
start_time = time.perf_counter()
|
570 |
-
indata = librosa.to_mono(indata.T)
|
571 |
-
if self.config.I_noise_reduce:
|
572 |
-
indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate)
|
573 |
-
|
574 |
-
"""noise gate"""
|
575 |
-
frame_length = 2048
|
576 |
-
hop_length = 1024
|
577 |
-
rms = librosa.feature.rms(
|
578 |
-
y=indata, frame_length=frame_length, hop_length=hop_length
|
579 |
-
)
|
580 |
-
db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
|
581 |
-
# print(rms.shape,db.shape,db)
|
582 |
-
for i in range(db_threhold.shape[0]):
|
583 |
-
if db_threhold[i]:
|
584 |
-
indata[i * hop_length : (i + 1) * hop_length] = 0
|
585 |
-
self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata)
|
586 |
-
|
587 |
-
# infer
|
588 |
-
print("input_wav:" + str(self.input_wav.shape))
|
589 |
-
# print('infered_wav:'+str(infer_wav.shape))
|
590 |
-
infer_wav: torch.Tensor = self.resampler2(
|
591 |
-
self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav)))
|
592 |
-
)[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to(
|
593 |
-
device
|
594 |
-
)
|
595 |
-
print("infer_wav:" + str(infer_wav.shape))
|
596 |
-
|
597 |
-
# SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
|
598 |
-
cor_nom = F.conv1d(
|
599 |
-
infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame],
|
600 |
-
self.sola_buffer[None, None, :],
|
601 |
-
)
|
602 |
-
cor_den = torch.sqrt(
|
603 |
-
F.conv1d(
|
604 |
-
infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
|
605 |
-
** 2,
|
606 |
-
torch.ones(1, 1, self.crossfade_frame, device=device),
|
607 |
-
)
|
608 |
-
+ 1e-8
|
609 |
-
)
|
610 |
-
sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
|
611 |
-
print("sola offset: " + str(int(sola_offset)))
|
612 |
-
|
613 |
-
# crossfade
|
614 |
-
self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame]
|
615 |
-
self.output_wav[: self.crossfade_frame] *= self.fade_in_window
|
616 |
-
self.output_wav[: self.crossfade_frame] += self.sola_buffer[:]
|
617 |
-
if sola_offset < self.sola_search_frame:
|
618 |
-
self.sola_buffer[:] = (
|
619 |
-
infer_wav[
|
620 |
-
-self.sola_search_frame
|
621 |
-
- self.crossfade_frame
|
622 |
-
+ sola_offset : -self.sola_search_frame
|
623 |
-
+ sola_offset
|
624 |
-
]
|
625 |
-
* self.fade_out_window
|
626 |
-
)
|
627 |
-
else:
|
628 |
-
self.sola_buffer[:] = (
|
629 |
-
infer_wav[-self.crossfade_frame :] * self.fade_out_window
|
630 |
-
)
|
631 |
-
|
632 |
-
if self.config.O_noise_reduce:
|
633 |
-
outdata[:] = np.tile(
|
634 |
-
nr.reduce_noise(
|
635 |
-
y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate
|
636 |
-
),
|
637 |
-
(2, 1),
|
638 |
-
).T
|
639 |
-
else:
|
640 |
-
outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy()
|
641 |
-
total_time = time.perf_counter() - start_time
|
642 |
-
self.window["infer_time"].update(int(total_time * 1000))
|
643 |
-
print("infer time:" + str(total_time))
|
644 |
-
|
645 |
-
def get_devices(self, update: bool = True):
|
646 |
-
"""获取设备列表"""
|
647 |
-
if update:
|
648 |
-
sd._terminate()
|
649 |
-
sd._initialize()
|
650 |
-
devices = sd.query_devices()
|
651 |
-
hostapis = sd.query_hostapis()
|
652 |
-
for hostapi in hostapis:
|
653 |
-
for device_idx in hostapi["devices"]:
|
654 |
-
devices[device_idx]["hostapi_name"] = hostapi["name"]
|
655 |
-
input_devices = [
|
656 |
-
f"{d['name']} ({d['hostapi_name']})"
|
657 |
-
for d in devices
|
658 |
-
if d["max_input_channels"] > 0
|
659 |
-
]
|
660 |
-
output_devices = [
|
661 |
-
f"{d['name']} ({d['hostapi_name']})"
|
662 |
-
for d in devices
|
663 |
-
if d["max_output_channels"] > 0
|
664 |
-
]
|
665 |
-
input_devices_indices = [
|
666 |
-
d["index"] if "index" in d else d["name"]
|
667 |
-
for d in devices
|
668 |
-
if d["max_input_channels"] > 0
|
669 |
-
]
|
670 |
-
output_devices_indices = [
|
671 |
-
d["index"] if "index" in d else d["name"]
|
672 |
-
for d in devices
|
673 |
-
if d["max_output_channels"] > 0
|
674 |
-
]
|
675 |
-
return (
|
676 |
-
input_devices,
|
677 |
-
output_devices,
|
678 |
-
input_devices_indices,
|
679 |
-
output_devices_indices,
|
680 |
-
)
|
681 |
-
|
682 |
-
def set_devices(self, input_device, output_device):
|
683 |
-
"""设置输出设备"""
|
684 |
-
(
|
685 |
-
input_devices,
|
686 |
-
output_devices,
|
687 |
-
input_device_indices,
|
688 |
-
output_device_indices,
|
689 |
-
) = self.get_devices()
|
690 |
-
sd.default.device[0] = input_device_indices[input_devices.index(input_device)]
|
691 |
-
sd.default.device[1] = output_device_indices[
|
692 |
-
output_devices.index(output_device)
|
693 |
-
]
|
694 |
-
print("input device:" + str(sd.default.device[0]) + ":" + str(input_device))
|
695 |
-
print("output device:" + str(sd.default.device[1]) + ":" + str(output_device))
|
696 |
-
|
697 |
-
|
698 |
-
gui = GUI()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/slicer2.py
DELETED
@@ -1,260 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
|
3 |
-
|
4 |
-
# This function is obtained from librosa.
|
5 |
-
def get_rms(
|
6 |
-
y,
|
7 |
-
frame_length=2048,
|
8 |
-
hop_length=512,
|
9 |
-
pad_mode="constant",
|
10 |
-
):
|
11 |
-
padding = (int(frame_length // 2), int(frame_length // 2))
|
12 |
-
y = np.pad(y, padding, mode=pad_mode)
|
13 |
-
|
14 |
-
axis = -1
|
15 |
-
# put our new within-frame axis at the end for now
|
16 |
-
out_strides = y.strides + tuple([y.strides[axis]])
|
17 |
-
# Reduce the shape on the framing axis
|
18 |
-
x_shape_trimmed = list(y.shape)
|
19 |
-
x_shape_trimmed[axis] -= frame_length - 1
|
20 |
-
out_shape = tuple(x_shape_trimmed) + tuple([frame_length])
|
21 |
-
xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides)
|
22 |
-
if axis < 0:
|
23 |
-
target_axis = axis - 1
|
24 |
-
else:
|
25 |
-
target_axis = axis + 1
|
26 |
-
xw = np.moveaxis(xw, -1, target_axis)
|
27 |
-
# Downsample along the target axis
|
28 |
-
slices = [slice(None)] * xw.ndim
|
29 |
-
slices[axis] = slice(0, None, hop_length)
|
30 |
-
x = xw[tuple(slices)]
|
31 |
-
|
32 |
-
# Calculate power
|
33 |
-
power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True)
|
34 |
-
|
35 |
-
return np.sqrt(power)
|
36 |
-
|
37 |
-
|
38 |
-
class Slicer:
|
39 |
-
def __init__(
|
40 |
-
self,
|
41 |
-
sr: int,
|
42 |
-
threshold: float = -40.0,
|
43 |
-
min_length: int = 5000,
|
44 |
-
min_interval: int = 300,
|
45 |
-
hop_size: int = 20,
|
46 |
-
max_sil_kept: int = 5000,
|
47 |
-
):
|
48 |
-
if not min_length >= min_interval >= hop_size:
|
49 |
-
raise ValueError(
|
50 |
-
"The following condition must be satisfied: min_length >= min_interval >= hop_size"
|
51 |
-
)
|
52 |
-
if not max_sil_kept >= hop_size:
|
53 |
-
raise ValueError(
|
54 |
-
"The following condition must be satisfied: max_sil_kept >= hop_size"
|
55 |
-
)
|
56 |
-
min_interval = sr * min_interval / 1000
|
57 |
-
self.threshold = 10 ** (threshold / 20.0)
|
58 |
-
self.hop_size = round(sr * hop_size / 1000)
|
59 |
-
self.win_size = min(round(min_interval), 4 * self.hop_size)
|
60 |
-
self.min_length = round(sr * min_length / 1000 / self.hop_size)
|
61 |
-
self.min_interval = round(min_interval / self.hop_size)
|
62 |
-
self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size)
|
63 |
-
|
64 |
-
def _apply_slice(self, waveform, begin, end):
|
65 |
-
if len(waveform.shape) > 1:
|
66 |
-
return waveform[
|
67 |
-
:, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size)
|
68 |
-
]
|
69 |
-
else:
|
70 |
-
return waveform[
|
71 |
-
begin * self.hop_size : min(waveform.shape[0], end * self.hop_size)
|
72 |
-
]
|
73 |
-
|
74 |
-
# @timeit
|
75 |
-
def slice(self, waveform):
|
76 |
-
if len(waveform.shape) > 1:
|
77 |
-
samples = waveform.mean(axis=0)
|
78 |
-
else:
|
79 |
-
samples = waveform
|
80 |
-
if samples.shape[0] <= self.min_length:
|
81 |
-
return [waveform]
|
82 |
-
rms_list = get_rms(
|
83 |
-
y=samples, frame_length=self.win_size, hop_length=self.hop_size
|
84 |
-
).squeeze(0)
|
85 |
-
sil_tags = []
|
86 |
-
silence_start = None
|
87 |
-
clip_start = 0
|
88 |
-
for i, rms in enumerate(rms_list):
|
89 |
-
# Keep looping while frame is silent.
|
90 |
-
if rms < self.threshold:
|
91 |
-
# Record start of silent frames.
|
92 |
-
if silence_start is None:
|
93 |
-
silence_start = i
|
94 |
-
continue
|
95 |
-
# Keep looping while frame is not silent and silence start has not been recorded.
|
96 |
-
if silence_start is None:
|
97 |
-
continue
|
98 |
-
# Clear recorded silence start if interval is not enough or clip is too short
|
99 |
-
is_leading_silence = silence_start == 0 and i > self.max_sil_kept
|
100 |
-
need_slice_middle = (
|
101 |
-
i - silence_start >= self.min_interval
|
102 |
-
and i - clip_start >= self.min_length
|
103 |
-
)
|
104 |
-
if not is_leading_silence and not need_slice_middle:
|
105 |
-
silence_start = None
|
106 |
-
continue
|
107 |
-
# Need slicing. Record the range of silent frames to be removed.
|
108 |
-
if i - silence_start <= self.max_sil_kept:
|
109 |
-
pos = rms_list[silence_start : i + 1].argmin() + silence_start
|
110 |
-
if silence_start == 0:
|
111 |
-
sil_tags.append((0, pos))
|
112 |
-
else:
|
113 |
-
sil_tags.append((pos, pos))
|
114 |
-
clip_start = pos
|
115 |
-
elif i - silence_start <= self.max_sil_kept * 2:
|
116 |
-
pos = rms_list[
|
117 |
-
i - self.max_sil_kept : silence_start + self.max_sil_kept + 1
|
118 |
-
].argmin()
|
119 |
-
pos += i - self.max_sil_kept
|
120 |
-
pos_l = (
|
121 |
-
rms_list[
|
122 |
-
silence_start : silence_start + self.max_sil_kept + 1
|
123 |
-
].argmin()
|
124 |
-
+ silence_start
|
125 |
-
)
|
126 |
-
pos_r = (
|
127 |
-
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
128 |
-
+ i
|
129 |
-
- self.max_sil_kept
|
130 |
-
)
|
131 |
-
if silence_start == 0:
|
132 |
-
sil_tags.append((0, pos_r))
|
133 |
-
clip_start = pos_r
|
134 |
-
else:
|
135 |
-
sil_tags.append((min(pos_l, pos), max(pos_r, pos)))
|
136 |
-
clip_start = max(pos_r, pos)
|
137 |
-
else:
|
138 |
-
pos_l = (
|
139 |
-
rms_list[
|
140 |
-
silence_start : silence_start + self.max_sil_kept + 1
|
141 |
-
].argmin()
|
142 |
-
+ silence_start
|
143 |
-
)
|
144 |
-
pos_r = (
|
145 |
-
rms_list[i - self.max_sil_kept : i + 1].argmin()
|
146 |
-
+ i
|
147 |
-
- self.max_sil_kept
|
148 |
-
)
|
149 |
-
if silence_start == 0:
|
150 |
-
sil_tags.append((0, pos_r))
|
151 |
-
else:
|
152 |
-
sil_tags.append((pos_l, pos_r))
|
153 |
-
clip_start = pos_r
|
154 |
-
silence_start = None
|
155 |
-
# Deal with trailing silence.
|
156 |
-
total_frames = rms_list.shape[0]
|
157 |
-
if (
|
158 |
-
silence_start is not None
|
159 |
-
and total_frames - silence_start >= self.min_interval
|
160 |
-
):
|
161 |
-
silence_end = min(total_frames, silence_start + self.max_sil_kept)
|
162 |
-
pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start
|
163 |
-
sil_tags.append((pos, total_frames + 1))
|
164 |
-
# Apply and return slices.
|
165 |
-
if len(sil_tags) == 0:
|
166 |
-
return [waveform]
|
167 |
-
else:
|
168 |
-
chunks = []
|
169 |
-
if sil_tags[0][0] > 0:
|
170 |
-
chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0]))
|
171 |
-
for i in range(len(sil_tags) - 1):
|
172 |
-
chunks.append(
|
173 |
-
self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])
|
174 |
-
)
|
175 |
-
if sil_tags[-1][1] < total_frames:
|
176 |
-
chunks.append(
|
177 |
-
self._apply_slice(waveform, sil_tags[-1][1], total_frames)
|
178 |
-
)
|
179 |
-
return chunks
|
180 |
-
|
181 |
-
|
182 |
-
def main():
|
183 |
-
import os.path
|
184 |
-
from argparse import ArgumentParser
|
185 |
-
|
186 |
-
import librosa
|
187 |
-
import soundfile
|
188 |
-
|
189 |
-
parser = ArgumentParser()
|
190 |
-
parser.add_argument("audio", type=str, help="The audio to be sliced")
|
191 |
-
parser.add_argument(
|
192 |
-
"--out", type=str, help="Output directory of the sliced audio clips"
|
193 |
-
)
|
194 |
-
parser.add_argument(
|
195 |
-
"--db_thresh",
|
196 |
-
type=float,
|
197 |
-
required=False,
|
198 |
-
default=-40,
|
199 |
-
help="The dB threshold for silence detection",
|
200 |
-
)
|
201 |
-
parser.add_argument(
|
202 |
-
"--min_length",
|
203 |
-
type=int,
|
204 |
-
required=False,
|
205 |
-
default=5000,
|
206 |
-
help="The minimum milliseconds required for each sliced audio clip",
|
207 |
-
)
|
208 |
-
parser.add_argument(
|
209 |
-
"--min_interval",
|
210 |
-
type=int,
|
211 |
-
required=False,
|
212 |
-
default=300,
|
213 |
-
help="The minimum milliseconds for a silence part to be sliced",
|
214 |
-
)
|
215 |
-
parser.add_argument(
|
216 |
-
"--hop_size",
|
217 |
-
type=int,
|
218 |
-
required=False,
|
219 |
-
default=10,
|
220 |
-
help="Frame length in milliseconds",
|
221 |
-
)
|
222 |
-
parser.add_argument(
|
223 |
-
"--max_sil_kept",
|
224 |
-
type=int,
|
225 |
-
required=False,
|
226 |
-
default=500,
|
227 |
-
help="The maximum silence length kept around the sliced clip, presented in milliseconds",
|
228 |
-
)
|
229 |
-
args = parser.parse_args()
|
230 |
-
out = args.out
|
231 |
-
if out is None:
|
232 |
-
out = os.path.dirname(os.path.abspath(args.audio))
|
233 |
-
audio, sr = librosa.load(args.audio, sr=None, mono=False)
|
234 |
-
slicer = Slicer(
|
235 |
-
sr=sr,
|
236 |
-
threshold=args.db_thresh,
|
237 |
-
min_length=args.min_length,
|
238 |
-
min_interval=args.min_interval,
|
239 |
-
hop_size=args.hop_size,
|
240 |
-
max_sil_kept=args.max_sil_kept,
|
241 |
-
)
|
242 |
-
chunks = slicer.slice(audio)
|
243 |
-
if not os.path.exists(out):
|
244 |
-
os.makedirs(out)
|
245 |
-
for i, chunk in enumerate(chunks):
|
246 |
-
if len(chunk.shape) > 1:
|
247 |
-
chunk = chunk.T
|
248 |
-
soundfile.write(
|
249 |
-
os.path.join(
|
250 |
-
out,
|
251 |
-
f"%s_%d.wav"
|
252 |
-
% (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i),
|
253 |
-
),
|
254 |
-
chunk,
|
255 |
-
sr,
|
256 |
-
)
|
257 |
-
|
258 |
-
|
259 |
-
if __name__ == "__main__":
|
260 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/data_gen_utils.py
DELETED
@@ -1,357 +0,0 @@
|
|
1 |
-
import warnings
|
2 |
-
|
3 |
-
warnings.filterwarnings("ignore")
|
4 |
-
|
5 |
-
import parselmouth
|
6 |
-
import os
|
7 |
-
import torch
|
8 |
-
from skimage.transform import resize
|
9 |
-
from utils.text_encoder import TokenTextEncoder
|
10 |
-
from utils.pitch_utils import f0_to_coarse
|
11 |
-
import struct
|
12 |
-
import webrtcvad
|
13 |
-
from scipy.ndimage.morphology import binary_dilation
|
14 |
-
import librosa
|
15 |
-
import numpy as np
|
16 |
-
from utils import audio
|
17 |
-
import pyloudnorm as pyln
|
18 |
-
import re
|
19 |
-
import json
|
20 |
-
from collections import OrderedDict
|
21 |
-
|
22 |
-
PUNCS = '!,.?;:'
|
23 |
-
|
24 |
-
int16_max = (2 ** 15) - 1
|
25 |
-
|
26 |
-
|
27 |
-
def trim_long_silences(path, sr=None, return_raw_wav=False, norm=True, vad_max_silence_length=12):
|
28 |
-
"""
|
29 |
-
Ensures that segments without voice in the waveform remain no longer than a
|
30 |
-
threshold determined by the VAD parameters in params.py.
|
31 |
-
:param wav: the raw waveform as a numpy array of floats
|
32 |
-
:param vad_max_silence_length: Maximum number of consecutive silent frames a segment can have.
|
33 |
-
:return: the same waveform with silences trimmed away (length <= original wav length)
|
34 |
-
"""
|
35 |
-
|
36 |
-
## Voice Activation Detection
|
37 |
-
# Window size of the VAD. Must be either 10, 20 or 30 milliseconds.
|
38 |
-
# This sets the granularity of the VAD. Should not need to be changed.
|
39 |
-
sampling_rate = 16000
|
40 |
-
wav_raw, sr = librosa.core.load(path, sr=sr)
|
41 |
-
|
42 |
-
if norm:
|
43 |
-
meter = pyln.Meter(sr) # create BS.1770 meter
|
44 |
-
loudness = meter.integrated_loudness(wav_raw)
|
45 |
-
wav_raw = pyln.normalize.loudness(wav_raw, loudness, -20.0)
|
46 |
-
if np.abs(wav_raw).max() > 1.0:
|
47 |
-
wav_raw = wav_raw / np.abs(wav_raw).max()
|
48 |
-
|
49 |
-
wav = librosa.resample(wav_raw, sr, sampling_rate, res_type='kaiser_best')
|
50 |
-
|
51 |
-
vad_window_length = 30 # In milliseconds
|
52 |
-
# Number of frames to average together when performing the moving average smoothing.
|
53 |
-
# The larger this value, the larger the VAD variations must be to not get smoothed out.
|
54 |
-
vad_moving_average_width = 8
|
55 |
-
|
56 |
-
# Compute the voice detection window size
|
57 |
-
samples_per_window = (vad_window_length * sampling_rate) // 1000
|
58 |
-
|
59 |
-
# Trim the end of the audio to have a multiple of the window size
|
60 |
-
wav = wav[:len(wav) - (len(wav) % samples_per_window)]
|
61 |
-
|
62 |
-
# Convert the float waveform to 16-bit mono PCM
|
63 |
-
pcm_wave = struct.pack("%dh" % len(wav), *(np.round(wav * int16_max)).astype(np.int16))
|
64 |
-
|
65 |
-
# Perform voice activation detection
|
66 |
-
voice_flags = []
|
67 |
-
vad = webrtcvad.Vad(mode=3)
|
68 |
-
for window_start in range(0, len(wav), samples_per_window):
|
69 |
-
window_end = window_start + samples_per_window
|
70 |
-
voice_flags.append(vad.is_speech(pcm_wave[window_start * 2:window_end * 2],
|
71 |
-
sample_rate=sampling_rate))
|
72 |
-
voice_flags = np.array(voice_flags)
|
73 |
-
|
74 |
-
# Smooth the voice detection with a moving average
|
75 |
-
def moving_average(array, width):
|
76 |
-
array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))
|
77 |
-
ret = np.cumsum(array_padded, dtype=float)
|
78 |
-
ret[width:] = ret[width:] - ret[:-width]
|
79 |
-
return ret[width - 1:] / width
|
80 |
-
|
81 |
-
audio_mask = moving_average(voice_flags, vad_moving_average_width)
|
82 |
-
audio_mask = np.round(audio_mask).astype(np.bool)
|
83 |
-
|
84 |
-
# Dilate the voiced regions
|
85 |
-
audio_mask = binary_dilation(audio_mask, np.ones(vad_max_silence_length + 1))
|
86 |
-
audio_mask = np.repeat(audio_mask, samples_per_window)
|
87 |
-
audio_mask = resize(audio_mask, (len(wav_raw),)) > 0
|
88 |
-
if return_raw_wav:
|
89 |
-
return wav_raw, audio_mask, sr
|
90 |
-
return wav_raw[audio_mask], audio_mask, sr
|
91 |
-
|
92 |
-
|
93 |
-
def process_utterance(wav_path,
|
94 |
-
fft_size=1024,
|
95 |
-
hop_size=256,
|
96 |
-
win_length=1024,
|
97 |
-
window="hann",
|
98 |
-
num_mels=80,
|
99 |
-
fmin=80,
|
100 |
-
fmax=7600,
|
101 |
-
eps=1e-6,
|
102 |
-
sample_rate=22050,
|
103 |
-
loud_norm=False,
|
104 |
-
min_level_db=-100,
|
105 |
-
return_linear=False,
|
106 |
-
trim_long_sil=False, vocoder='pwg'):
|
107 |
-
if isinstance(wav_path, str):
|
108 |
-
if trim_long_sil:
|
109 |
-
wav, _, _ = trim_long_silences(wav_path, sample_rate)
|
110 |
-
else:
|
111 |
-
wav, _ = librosa.core.load(wav_path, sr=sample_rate)
|
112 |
-
else:
|
113 |
-
wav = wav_path
|
114 |
-
|
115 |
-
if loud_norm:
|
116 |
-
meter = pyln.Meter(sample_rate) # create BS.1770 meter
|
117 |
-
loudness = meter.integrated_loudness(wav)
|
118 |
-
wav = pyln.normalize.loudness(wav, loudness, -22.0)
|
119 |
-
if np.abs(wav).max() > 1:
|
120 |
-
wav = wav / np.abs(wav).max()
|
121 |
-
|
122 |
-
# get amplitude spectrogram
|
123 |
-
x_stft = librosa.stft(wav, n_fft=fft_size, hop_length=hop_size,
|
124 |
-
win_length=win_length, window=window, pad_mode="constant")
|
125 |
-
spc = np.abs(x_stft) # (n_bins, T)
|
126 |
-
|
127 |
-
# get mel basis
|
128 |
-
fmin = 0 if fmin == -1 else fmin
|
129 |
-
fmax = sample_rate / 2 if fmax == -1 else fmax
|
130 |
-
mel_basis = librosa.filters.mel(sample_rate, fft_size, num_mels, fmin, fmax)
|
131 |
-
mel = mel_basis @ spc
|
132 |
-
|
133 |
-
if vocoder == 'pwg':
|
134 |
-
mel = np.log10(np.maximum(eps, mel)) # (n_mel_bins, T)
|
135 |
-
else:
|
136 |
-
assert False, f'"{vocoder}" is not in ["pwg"].'
|
137 |
-
|
138 |
-
l_pad, r_pad = audio.librosa_pad_lr(wav, fft_size, hop_size, 1)
|
139 |
-
wav = np.pad(wav, (l_pad, r_pad), mode='constant', constant_values=0.0)
|
140 |
-
wav = wav[:mel.shape[1] * hop_size]
|
141 |
-
|
142 |
-
if not return_linear:
|
143 |
-
return wav, mel
|
144 |
-
else:
|
145 |
-
spc = audio.amp_to_db(spc)
|
146 |
-
spc = audio.normalize(spc, {'min_level_db': min_level_db})
|
147 |
-
return wav, mel, spc
|
148 |
-
|
149 |
-
|
150 |
-
def get_pitch(wav_data, mel, hparams):
|
151 |
-
"""
|
152 |
-
|
153 |
-
:param wav_data: [T]
|
154 |
-
:param mel: [T, 80]
|
155 |
-
:param hparams:
|
156 |
-
:return:
|
157 |
-
"""
|
158 |
-
time_step = hparams['hop_size'] / hparams['audio_sample_rate'] * 1000
|
159 |
-
f0_min = 80
|
160 |
-
f0_max = 750
|
161 |
-
|
162 |
-
if hparams['hop_size'] == 128:
|
163 |
-
pad_size = 4
|
164 |
-
elif hparams['hop_size'] == 256:
|
165 |
-
pad_size = 2
|
166 |
-
else:
|
167 |
-
assert False
|
168 |
-
|
169 |
-
f0 = parselmouth.Sound(wav_data, hparams['audio_sample_rate']).to_pitch_ac(
|
170 |
-
time_step=time_step / 1000, voicing_threshold=0.6,
|
171 |
-
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
|
172 |
-
lpad = pad_size * 2
|
173 |
-
rpad = len(mel) - len(f0) - lpad
|
174 |
-
f0 = np.pad(f0, [[lpad, rpad]], mode='constant')
|
175 |
-
# mel and f0 are extracted by 2 different libraries. we should force them to have the same length.
|
176 |
-
# Attention: we find that new version of some libraries could cause ``rpad'' to be a negetive value...
|
177 |
-
# Just to be sure, we recommend users to set up the same environments as them in requirements_auto.txt (by Anaconda)
|
178 |
-
delta_l = len(mel) - len(f0)
|
179 |
-
assert np.abs(delta_l) <= 8
|
180 |
-
if delta_l > 0:
|
181 |
-
f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0)
|
182 |
-
f0 = f0[:len(mel)]
|
183 |
-
pitch_coarse = f0_to_coarse(f0)
|
184 |
-
return f0, pitch_coarse
|
185 |
-
|
186 |
-
|
187 |
-
def remove_empty_lines(text):
|
188 |
-
"""remove empty lines"""
|
189 |
-
assert (len(text) > 0)
|
190 |
-
assert (isinstance(text, list))
|
191 |
-
text = [t.strip() for t in text]
|
192 |
-
if "" in text:
|
193 |
-
text.remove("")
|
194 |
-
return text
|
195 |
-
|
196 |
-
|
197 |
-
class TextGrid(object):
|
198 |
-
def __init__(self, text):
|
199 |
-
text = remove_empty_lines(text)
|
200 |
-
self.text = text
|
201 |
-
self.line_count = 0
|
202 |
-
self._get_type()
|
203 |
-
self._get_time_intval()
|
204 |
-
self._get_size()
|
205 |
-
self.tier_list = []
|
206 |
-
self._get_item_list()
|
207 |
-
|
208 |
-
def _extract_pattern(self, pattern, inc):
|
209 |
-
"""
|
210 |
-
Parameters
|
211 |
-
----------
|
212 |
-
pattern : regex to extract pattern
|
213 |
-
inc : increment of line count after extraction
|
214 |
-
Returns
|
215 |
-
-------
|
216 |
-
group : extracted info
|
217 |
-
"""
|
218 |
-
try:
|
219 |
-
group = re.match(pattern, self.text[self.line_count]).group(1)
|
220 |
-
self.line_count += inc
|
221 |
-
except AttributeError:
|
222 |
-
raise ValueError("File format error at line %d:%s" % (self.line_count, self.text[self.line_count]))
|
223 |
-
return group
|
224 |
-
|
225 |
-
def _get_type(self):
|
226 |
-
self.file_type = self._extract_pattern(r"File type = \"(.*)\"", 2)
|
227 |
-
|
228 |
-
def _get_time_intval(self):
|
229 |
-
self.xmin = self._extract_pattern(r"xmin = (.*)", 1)
|
230 |
-
self.xmax = self._extract_pattern(r"xmax = (.*)", 2)
|
231 |
-
|
232 |
-
def _get_size(self):
|
233 |
-
self.size = int(self._extract_pattern(r"size = (.*)", 2))
|
234 |
-
|
235 |
-
def _get_item_list(self):
|
236 |
-
"""Only supports IntervalTier currently"""
|
237 |
-
for itemIdx in range(1, self.size + 1):
|
238 |
-
tier = OrderedDict()
|
239 |
-
item_list = []
|
240 |
-
tier_idx = self._extract_pattern(r"item \[(.*)\]:", 1)
|
241 |
-
tier_class = self._extract_pattern(r"class = \"(.*)\"", 1)
|
242 |
-
if tier_class != "IntervalTier":
|
243 |
-
raise NotImplementedError("Only IntervalTier class is supported currently")
|
244 |
-
tier_name = self._extract_pattern(r"name = \"(.*)\"", 1)
|
245 |
-
tier_xmin = self._extract_pattern(r"xmin = (.*)", 1)
|
246 |
-
tier_xmax = self._extract_pattern(r"xmax = (.*)", 1)
|
247 |
-
tier_size = self._extract_pattern(r"intervals: size = (.*)", 1)
|
248 |
-
for i in range(int(tier_size)):
|
249 |
-
item = OrderedDict()
|
250 |
-
item["idx"] = self._extract_pattern(r"intervals \[(.*)\]", 1)
|
251 |
-
item["xmin"] = self._extract_pattern(r"xmin = (.*)", 1)
|
252 |
-
item["xmax"] = self._extract_pattern(r"xmax = (.*)", 1)
|
253 |
-
item["text"] = self._extract_pattern(r"text = \"(.*)\"", 1)
|
254 |
-
item_list.append(item)
|
255 |
-
tier["idx"] = tier_idx
|
256 |
-
tier["class"] = tier_class
|
257 |
-
tier["name"] = tier_name
|
258 |
-
tier["xmin"] = tier_xmin
|
259 |
-
tier["xmax"] = tier_xmax
|
260 |
-
tier["size"] = tier_size
|
261 |
-
tier["items"] = item_list
|
262 |
-
self.tier_list.append(tier)
|
263 |
-
|
264 |
-
def toJson(self):
|
265 |
-
_json = OrderedDict()
|
266 |
-
_json["file_type"] = self.file_type
|
267 |
-
_json["xmin"] = self.xmin
|
268 |
-
_json["xmax"] = self.xmax
|
269 |
-
_json["size"] = self.size
|
270 |
-
_json["tiers"] = self.tier_list
|
271 |
-
return json.dumps(_json, ensure_ascii=False, indent=2)
|
272 |
-
|
273 |
-
|
274 |
-
def get_mel2ph(tg_fn, ph, mel, hparams):
|
275 |
-
ph_list = ph.split(" ")
|
276 |
-
with open(tg_fn, "r") as f:
|
277 |
-
tg = f.readlines()
|
278 |
-
tg = remove_empty_lines(tg)
|
279 |
-
tg = TextGrid(tg)
|
280 |
-
tg = json.loads(tg.toJson())
|
281 |
-
split = np.ones(len(ph_list) + 1, np.float) * -1
|
282 |
-
tg_idx = 0
|
283 |
-
ph_idx = 0
|
284 |
-
tg_align = [x for x in tg['tiers'][-1]['items']]
|
285 |
-
tg_align_ = []
|
286 |
-
for x in tg_align:
|
287 |
-
x['xmin'] = float(x['xmin'])
|
288 |
-
x['xmax'] = float(x['xmax'])
|
289 |
-
if x['text'] in ['sil', 'sp', '', 'SIL', 'PUNC']:
|
290 |
-
x['text'] = ''
|
291 |
-
if len(tg_align_) > 0 and tg_align_[-1]['text'] == '':
|
292 |
-
tg_align_[-1]['xmax'] = x['xmax']
|
293 |
-
continue
|
294 |
-
tg_align_.append(x)
|
295 |
-
tg_align = tg_align_
|
296 |
-
tg_len = len([x for x in tg_align if x['text'] != ''])
|
297 |
-
ph_len = len([x for x in ph_list if not is_sil_phoneme(x)])
|
298 |
-
assert tg_len == ph_len, (tg_len, ph_len, tg_align, ph_list, tg_fn)
|
299 |
-
while tg_idx < len(tg_align) or ph_idx < len(ph_list):
|
300 |
-
if tg_idx == len(tg_align) and is_sil_phoneme(ph_list[ph_idx]):
|
301 |
-
split[ph_idx] = 1e8
|
302 |
-
ph_idx += 1
|
303 |
-
continue
|
304 |
-
x = tg_align[tg_idx]
|
305 |
-
if x['text'] == '' and ph_idx == len(ph_list):
|
306 |
-
tg_idx += 1
|
307 |
-
continue
|
308 |
-
assert ph_idx < len(ph_list), (tg_len, ph_len, tg_align, ph_list, tg_fn)
|
309 |
-
ph = ph_list[ph_idx]
|
310 |
-
if x['text'] == '' and not is_sil_phoneme(ph):
|
311 |
-
assert False, (ph_list, tg_align)
|
312 |
-
if x['text'] != '' and is_sil_phoneme(ph):
|
313 |
-
ph_idx += 1
|
314 |
-
else:
|
315 |
-
assert (x['text'] == '' and is_sil_phoneme(ph)) \
|
316 |
-
or x['text'].lower() == ph.lower() \
|
317 |
-
or x['text'].lower() == 'sil', (x['text'], ph)
|
318 |
-
split[ph_idx] = x['xmin']
|
319 |
-
if ph_idx > 0 and split[ph_idx - 1] == -1 and is_sil_phoneme(ph_list[ph_idx - 1]):
|
320 |
-
split[ph_idx - 1] = split[ph_idx]
|
321 |
-
ph_idx += 1
|
322 |
-
tg_idx += 1
|
323 |
-
assert tg_idx == len(tg_align), (tg_idx, [x['text'] for x in tg_align])
|
324 |
-
assert ph_idx >= len(ph_list) - 1, (ph_idx, ph_list, len(ph_list), [x['text'] for x in tg_align], tg_fn)
|
325 |
-
mel2ph = np.zeros([mel.shape[0]], np.int)
|
326 |
-
split[0] = 0
|
327 |
-
split[-1] = 1e8
|
328 |
-
for i in range(len(split) - 1):
|
329 |
-
assert split[i] != -1 and split[i] <= split[i + 1], (split[:-1],)
|
330 |
-
split = [int(s * hparams['audio_sample_rate'] / hparams['hop_size'] + 0.5) for s in split]
|
331 |
-
for ph_idx in range(len(ph_list)):
|
332 |
-
mel2ph[split[ph_idx]:split[ph_idx + 1]] = ph_idx + 1
|
333 |
-
mel2ph_torch = torch.from_numpy(mel2ph)
|
334 |
-
T_t = len(ph_list)
|
335 |
-
dur = mel2ph_torch.new_zeros([T_t + 1]).scatter_add(0, mel2ph_torch, torch.ones_like(mel2ph_torch))
|
336 |
-
dur = dur[1:].numpy()
|
337 |
-
return mel2ph, dur
|
338 |
-
|
339 |
-
|
340 |
-
def build_phone_encoder(data_dir):
|
341 |
-
phone_list_file = os.path.join(data_dir, 'phone_set.json')
|
342 |
-
phone_list = json.load(open(phone_list_file))
|
343 |
-
return TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
|
344 |
-
|
345 |
-
|
346 |
-
def build_word_encoder(data_dir):
|
347 |
-
word_list_file = os.path.join(data_dir, 'word_set.json')
|
348 |
-
word_list = json.load(open(word_list_file))
|
349 |
-
return TokenTextEncoder(None, vocab_list=word_list, replace_oov=',')
|
350 |
-
|
351 |
-
def is_sil_phoneme(p):
|
352 |
-
return not p[0].isalpha()
|
353 |
-
|
354 |
-
|
355 |
-
def build_token_encoder(token_list_file):
|
356 |
-
token_list = json.load(open(token_list_file))
|
357 |
-
return TokenTextEncoder(None, vocab_list=token_list, replace_oov='<UNK>')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/emotion/params_model.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
|
2 |
-
## Model parameters
|
3 |
-
model_hidden_size = 256
|
4 |
-
model_embedding_size = 256
|
5 |
-
model_num_layers = 3
|
6 |
-
|
7 |
-
|
8 |
-
## Training parameters
|
9 |
-
learning_rate_init = 1e-4
|
10 |
-
speakers_per_batch = 6
|
11 |
-
utterances_per_speaker = 20
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/custom_dataset/.ipynb_checkpoints/yolov6_s_fast-checkpoint.py
DELETED
@@ -1,124 +0,0 @@
|
|
1 |
-
_base_ = '../yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py'
|
2 |
-
|
3 |
-
max_epochs = 100 # 训练的最大 epoch
|
4 |
-
data_root = './data-df2/' # 数据集目录的绝对路径
|
5 |
-
|
6 |
-
# 结果保存的路径,可以省略,省略保存的文件名位于 work_dirs 下 config 同名的文件夹中
|
7 |
-
# 如果某个 config 只是修改了部分参数,修改这个变量就可以将新的训练文件保存到其他地方
|
8 |
-
work_dir = './work_dirs/yolov6_s_df2'
|
9 |
-
|
10 |
-
# 根据自己的 GPU 情况,修改 batch size,YOLOv5-s 默认为 8卡 x 16bs
|
11 |
-
train_batch_size_per_gpu = 32
|
12 |
-
train_num_workers = 4 # 推荐使用 train_num_workers = nGPU x 4
|
13 |
-
|
14 |
-
save_epoch_intervals = 2 # 每 interval 轮迭代进行一次保存一次权重
|
15 |
-
|
16 |
-
# 根据自己的 GPU 情况,修改 base_lr,修改的比例是 base_lr_default * (your_bs / default_bs)
|
17 |
-
base_lr = _base_.base_lr / 4
|
18 |
-
|
19 |
-
class_name = ('short_sleeved_shirt',
|
20 |
-
'long_sleeved_shirt',
|
21 |
-
'short_sleeved_outwear',
|
22 |
-
'long_sleeved_outwear',
|
23 |
-
'vest',
|
24 |
-
'sling',
|
25 |
-
'shorts',
|
26 |
-
'trousers',
|
27 |
-
'skirt',
|
28 |
-
'short_sleeved_dress',
|
29 |
-
'long_sleeved_dress',
|
30 |
-
'vest_dress',
|
31 |
-
'sling_dress') # 根据 class_with_id.txt 类别信息,设置 class_name
|
32 |
-
|
33 |
-
num_classes = len(class_name)
|
34 |
-
metainfo = dict(
|
35 |
-
classes=class_name,
|
36 |
-
palette=[(255, 0, 0),
|
37 |
-
(255, 128, 0),
|
38 |
-
(255, 255, 0),
|
39 |
-
(128, 255, 0),
|
40 |
-
(0, 255, 0),
|
41 |
-
(0, 255, 128),
|
42 |
-
(0, 255, 255),
|
43 |
-
(0, 128, 255),
|
44 |
-
(0, 0, 255),
|
45 |
-
(127, 0, 255),
|
46 |
-
(255, 0, 255),
|
47 |
-
(255, 0, 127),
|
48 |
-
(128, 128, 128)] # 画图时候的颜色,随便设置即可
|
49 |
-
)
|
50 |
-
|
51 |
-
train_cfg = dict(
|
52 |
-
max_epochs=max_epochs,
|
53 |
-
val_begin=20, # 第几个 epoch 后验证,这里设置 20 是因为前 20 个 epoch 精度不高,测试意义不大,故跳过
|
54 |
-
val_interval=save_epoch_intervals, # 每 val_interval 轮迭代进行一次测试评估
|
55 |
-
dynamic_intervals=[(max_epochs-_base_.num_last_epochs, 1)]
|
56 |
-
)
|
57 |
-
|
58 |
-
model = dict(
|
59 |
-
bbox_head=dict(
|
60 |
-
head_module=dict(num_classes=num_classes)),
|
61 |
-
train_cfg=dict(
|
62 |
-
initial_assigner=dict(num_classes=num_classes),
|
63 |
-
assigner=dict(num_classes=num_classes)
|
64 |
-
)
|
65 |
-
)
|
66 |
-
|
67 |
-
train_dataloader = dict(
|
68 |
-
batch_size=train_batch_size_per_gpu,
|
69 |
-
num_workers=train_num_workers,
|
70 |
-
dataset=dict(
|
71 |
-
_delete_=True,
|
72 |
-
type='RepeatDataset',
|
73 |
-
# 数据量太少的话,可以使用 RepeatDataset ,在每个 epoch 内重复当前数据集 n 次,这里设置 5 是重复 5 次
|
74 |
-
times=2,
|
75 |
-
dataset=dict(
|
76 |
-
type=_base_.dataset_type,
|
77 |
-
data_root=data_root,
|
78 |
-
metainfo=metainfo,
|
79 |
-
ann_file='annotations/trainval.json',
|
80 |
-
data_prefix=dict(img='smaller-dataset/'),
|
81 |
-
filter_cfg=dict(filter_empty_gt=False, min_size=32),
|
82 |
-
pipeline=_base_.train_pipeline)))
|
83 |
-
|
84 |
-
val_dataloader = dict(
|
85 |
-
dataset=dict(
|
86 |
-
metainfo=metainfo,
|
87 |
-
data_root=data_root,
|
88 |
-
ann_file='annotations/trainval.json',
|
89 |
-
data_prefix=dict(img='smaller-dataset/')))
|
90 |
-
|
91 |
-
test_dataloader = val_dataloader
|
92 |
-
|
93 |
-
val_evaluator = dict(ann_file=data_root + 'annotations/trainval.json')
|
94 |
-
test_evaluator = val_evaluator
|
95 |
-
|
96 |
-
optim_wrapper = dict(optimizer=dict(lr=base_lr))
|
97 |
-
|
98 |
-
default_hooks = dict(
|
99 |
-
# 设置间隔多少个 epoch 保存模型,以及保存模型最多几个,`save_best` 是另外保存最佳模型(推荐)
|
100 |
-
checkpoint=dict(
|
101 |
-
type='CheckpointHook',
|
102 |
-
interval=save_epoch_intervals,
|
103 |
-
max_keep_ckpts=5,
|
104 |
-
save_best='auto'),
|
105 |
-
param_scheduler=dict(max_epochs=max_epochs),
|
106 |
-
# logger 输出的间隔
|
107 |
-
logger=dict(type='LoggerHook', interval=10))
|
108 |
-
|
109 |
-
custom_hooks = [
|
110 |
-
dict(
|
111 |
-
type="EMAHook",
|
112 |
-
ema_type="ExpMomentumEMA",
|
113 |
-
momentum=0.0001,
|
114 |
-
update_buffers=True,
|
115 |
-
strict_load=False,
|
116 |
-
priority=49),
|
117 |
-
dict(
|
118 |
-
type="mmdet.PipelineSwitchHook",
|
119 |
-
switch_epoch=max_epochs-max_epochs-_base_.num_last_epochs,
|
120 |
-
switch_pipeline=_base_.train_pipeline_stage2
|
121 |
-
)
|
122 |
-
]
|
123 |
-
|
124 |
-
visualizer = dict(vis_backends=[dict(type='LocalVisBackend'), dict(type='WandbVisBackend')])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Better.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import requests
|
4 |
-
from typing import Dict, get_type_hints
|
5 |
-
|
6 |
-
url = 'https://openai-proxy-api.vercel.app/v1/'
|
7 |
-
model = [
|
8 |
-
'gpt-3.5-turbo',
|
9 |
-
'gpt-3.5-turbo-0613',
|
10 |
-
'gpt-3.5-turbo-16k',
|
11 |
-
'gpt-3.5-turbo-16k-0613',
|
12 |
-
'gpt-4',
|
13 |
-
]
|
14 |
-
|
15 |
-
supports_stream = True
|
16 |
-
needs_auth = False
|
17 |
-
|
18 |
-
|
19 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
20 |
-
headers = {
|
21 |
-
'Content-Type': 'application/json',
|
22 |
-
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.58',
|
23 |
-
'Referer': 'https://chat.ylokh.xyz/',
|
24 |
-
'Origin': 'https://chat.ylokh.xyz',
|
25 |
-
'Connection': 'keep-alive',
|
26 |
-
}
|
27 |
-
|
28 |
-
json_data = {
|
29 |
-
'messages': messages,
|
30 |
-
'temperature': 1.0,
|
31 |
-
'model': model,
|
32 |
-
'stream': stream,
|
33 |
-
}
|
34 |
-
|
35 |
-
response = requests.post(
|
36 |
-
'https://openai-proxy-api.vercel.app/v1/chat/completions', headers=headers, json=json_data, stream=True
|
37 |
-
)
|
38 |
-
|
39 |
-
for token in response.iter_lines():
|
40 |
-
decoded = token.decode('utf-8')
|
41 |
-
if decoded.startswith('data: '):
|
42 |
-
data_str = decoded.replace('data: ', '')
|
43 |
-
data = json.loads(data_str)
|
44 |
-
if 'choices' in data and 'delta' in data['choices'][0]:
|
45 |
-
delta = data['choices'][0]['delta']
|
46 |
-
content = delta.get('content', '')
|
47 |
-
finish_reason = delta.get('finish_reason', '')
|
48 |
-
|
49 |
-
if finish_reason == 'stop':
|
50 |
-
break
|
51 |
-
if content:
|
52 |
-
yield content
|
53 |
-
|
54 |
-
|
55 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + '(%s)' % ', '.join(
|
56 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alcedo/yunmedia/resources/chatgpt-plugin/live2d/live2dcubismcore.min.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/AlexWang/lama/saicinpainting/evaluation/losses/lpips.py
DELETED
@@ -1,891 +0,0 @@
|
|
1 |
-
############################################################
|
2 |
-
# The contents below have been combined using files in the #
|
3 |
-
# following repository: #
|
4 |
-
# https://github.com/richzhang/PerceptualSimilarity #
|
5 |
-
############################################################
|
6 |
-
|
7 |
-
############################################################
|
8 |
-
# __init__.py #
|
9 |
-
############################################################
|
10 |
-
|
11 |
-
import numpy as np
|
12 |
-
from skimage.metrics import structural_similarity
|
13 |
-
import torch
|
14 |
-
|
15 |
-
from saicinpainting.utils import get_shape
|
16 |
-
|
17 |
-
|
18 |
-
class PerceptualLoss(torch.nn.Module):
|
19 |
-
def __init__(self, model='net-lin', net='alex', colorspace='rgb', model_path=None, spatial=False, use_gpu=True):
|
20 |
-
# VGG using our perceptually-learned weights (LPIPS metric)
|
21 |
-
# def __init__(self, model='net', net='vgg', use_gpu=True): # "default" way of using VGG as a perceptual loss
|
22 |
-
super(PerceptualLoss, self).__init__()
|
23 |
-
self.use_gpu = use_gpu
|
24 |
-
self.spatial = spatial
|
25 |
-
self.model = DistModel()
|
26 |
-
self.model.initialize(model=model, net=net, use_gpu=use_gpu, colorspace=colorspace,
|
27 |
-
model_path=model_path, spatial=self.spatial)
|
28 |
-
|
29 |
-
def forward(self, pred, target, normalize=True):
|
30 |
-
"""
|
31 |
-
Pred and target are Variables.
|
32 |
-
If normalize is True, assumes the images are between [0,1] and then scales them between [-1,+1]
|
33 |
-
If normalize is False, assumes the images are already between [-1,+1]
|
34 |
-
Inputs pred and target are Nx3xHxW
|
35 |
-
Output pytorch Variable N long
|
36 |
-
"""
|
37 |
-
|
38 |
-
if normalize:
|
39 |
-
target = 2 * target - 1
|
40 |
-
pred = 2 * pred - 1
|
41 |
-
|
42 |
-
return self.model(target, pred)
|
43 |
-
|
44 |
-
|
45 |
-
def normalize_tensor(in_feat, eps=1e-10):
|
46 |
-
norm_factor = torch.sqrt(torch.sum(in_feat ** 2, dim=1, keepdim=True))
|
47 |
-
return in_feat / (norm_factor + eps)
|
48 |
-
|
49 |
-
|
50 |
-
def l2(p0, p1, range=255.):
|
51 |
-
return .5 * np.mean((p0 / range - p1 / range) ** 2)
|
52 |
-
|
53 |
-
|
54 |
-
def psnr(p0, p1, peak=255.):
|
55 |
-
return 10 * np.log10(peak ** 2 / np.mean((1. * p0 - 1. * p1) ** 2))
|
56 |
-
|
57 |
-
|
58 |
-
def dssim(p0, p1, range=255.):
|
59 |
-
return (1 - compare_ssim(p0, p1, data_range=range, multichannel=True)) / 2.
|
60 |
-
|
61 |
-
|
62 |
-
def rgb2lab(in_img, mean_cent=False):
|
63 |
-
from skimage import color
|
64 |
-
img_lab = color.rgb2lab(in_img)
|
65 |
-
if (mean_cent):
|
66 |
-
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
|
67 |
-
return img_lab
|
68 |
-
|
69 |
-
|
70 |
-
def tensor2np(tensor_obj):
|
71 |
-
# change dimension of a tensor object into a numpy array
|
72 |
-
return tensor_obj[0].cpu().float().numpy().transpose((1, 2, 0))
|
73 |
-
|
74 |
-
|
75 |
-
def np2tensor(np_obj):
|
76 |
-
# change dimenion of np array into tensor array
|
77 |
-
return torch.Tensor(np_obj[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
|
78 |
-
|
79 |
-
|
80 |
-
def tensor2tensorlab(image_tensor, to_norm=True, mc_only=False):
|
81 |
-
# image tensor to lab tensor
|
82 |
-
from skimage import color
|
83 |
-
|
84 |
-
img = tensor2im(image_tensor)
|
85 |
-
img_lab = color.rgb2lab(img)
|
86 |
-
if (mc_only):
|
87 |
-
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
|
88 |
-
if (to_norm and not mc_only):
|
89 |
-
img_lab[:, :, 0] = img_lab[:, :, 0] - 50
|
90 |
-
img_lab = img_lab / 100.
|
91 |
-
|
92 |
-
return np2tensor(img_lab)
|
93 |
-
|
94 |
-
|
95 |
-
def tensorlab2tensor(lab_tensor, return_inbnd=False):
|
96 |
-
from skimage import color
|
97 |
-
import warnings
|
98 |
-
warnings.filterwarnings("ignore")
|
99 |
-
|
100 |
-
lab = tensor2np(lab_tensor) * 100.
|
101 |
-
lab[:, :, 0] = lab[:, :, 0] + 50
|
102 |
-
|
103 |
-
rgb_back = 255. * np.clip(color.lab2rgb(lab.astype('float')), 0, 1)
|
104 |
-
if (return_inbnd):
|
105 |
-
# convert back to lab, see if we match
|
106 |
-
lab_back = color.rgb2lab(rgb_back.astype('uint8'))
|
107 |
-
mask = 1. * np.isclose(lab_back, lab, atol=2.)
|
108 |
-
mask = np2tensor(np.prod(mask, axis=2)[:, :, np.newaxis])
|
109 |
-
return (im2tensor(rgb_back), mask)
|
110 |
-
else:
|
111 |
-
return im2tensor(rgb_back)
|
112 |
-
|
113 |
-
|
114 |
-
def rgb2lab(input):
|
115 |
-
from skimage import color
|
116 |
-
return color.rgb2lab(input / 255.)
|
117 |
-
|
118 |
-
|
119 |
-
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.):
|
120 |
-
image_numpy = image_tensor[0].cpu().float().numpy()
|
121 |
-
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
|
122 |
-
return image_numpy.astype(imtype)
|
123 |
-
|
124 |
-
|
125 |
-
def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.):
|
126 |
-
return torch.Tensor((image / factor - cent)
|
127 |
-
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
|
128 |
-
|
129 |
-
|
130 |
-
def tensor2vec(vector_tensor):
|
131 |
-
return vector_tensor.data.cpu().numpy()[:, :, 0, 0]
|
132 |
-
|
133 |
-
|
134 |
-
def voc_ap(rec, prec, use_07_metric=False):
|
135 |
-
""" ap = voc_ap(rec, prec, [use_07_metric])
|
136 |
-
Compute VOC AP given precision and recall.
|
137 |
-
If use_07_metric is true, uses the
|
138 |
-
VOC 07 11 point method (default:False).
|
139 |
-
"""
|
140 |
-
if use_07_metric:
|
141 |
-
# 11 point metric
|
142 |
-
ap = 0.
|
143 |
-
for t in np.arange(0., 1.1, 0.1):
|
144 |
-
if np.sum(rec >= t) == 0:
|
145 |
-
p = 0
|
146 |
-
else:
|
147 |
-
p = np.max(prec[rec >= t])
|
148 |
-
ap = ap + p / 11.
|
149 |
-
else:
|
150 |
-
# correct AP calculation
|
151 |
-
# first append sentinel values at the end
|
152 |
-
mrec = np.concatenate(([0.], rec, [1.]))
|
153 |
-
mpre = np.concatenate(([0.], prec, [0.]))
|
154 |
-
|
155 |
-
# compute the precision envelope
|
156 |
-
for i in range(mpre.size - 1, 0, -1):
|
157 |
-
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
|
158 |
-
|
159 |
-
# to calculate area under PR curve, look for points
|
160 |
-
# where X axis (recall) changes value
|
161 |
-
i = np.where(mrec[1:] != mrec[:-1])[0]
|
162 |
-
|
163 |
-
# and sum (\Delta recall) * prec
|
164 |
-
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
|
165 |
-
return ap
|
166 |
-
|
167 |
-
|
168 |
-
def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=255. / 2.):
|
169 |
-
# def tensor2im(image_tensor, imtype=np.uint8, cent=1., factor=1.):
|
170 |
-
image_numpy = image_tensor[0].cpu().float().numpy()
|
171 |
-
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + cent) * factor
|
172 |
-
return image_numpy.astype(imtype)
|
173 |
-
|
174 |
-
|
175 |
-
def im2tensor(image, imtype=np.uint8, cent=1., factor=255. / 2.):
|
176 |
-
# def im2tensor(image, imtype=np.uint8, cent=1., factor=1.):
|
177 |
-
return torch.Tensor((image / factor - cent)
|
178 |
-
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
|
179 |
-
|
180 |
-
|
181 |
-
############################################################
|
182 |
-
# base_model.py #
|
183 |
-
############################################################
|
184 |
-
|
185 |
-
|
186 |
-
class BaseModel(torch.nn.Module):
|
187 |
-
def __init__(self):
|
188 |
-
super().__init__()
|
189 |
-
|
190 |
-
def name(self):
|
191 |
-
return 'BaseModel'
|
192 |
-
|
193 |
-
def initialize(self, use_gpu=True):
|
194 |
-
self.use_gpu = use_gpu
|
195 |
-
|
196 |
-
def forward(self):
|
197 |
-
pass
|
198 |
-
|
199 |
-
def get_image_paths(self):
|
200 |
-
pass
|
201 |
-
|
202 |
-
def optimize_parameters(self):
|
203 |
-
pass
|
204 |
-
|
205 |
-
def get_current_visuals(self):
|
206 |
-
return self.input
|
207 |
-
|
208 |
-
def get_current_errors(self):
|
209 |
-
return {}
|
210 |
-
|
211 |
-
def save(self, label):
|
212 |
-
pass
|
213 |
-
|
214 |
-
# helper saving function that can be used by subclasses
|
215 |
-
def save_network(self, network, path, network_label, epoch_label):
|
216 |
-
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
|
217 |
-
save_path = os.path.join(path, save_filename)
|
218 |
-
torch.save(network.state_dict(), save_path)
|
219 |
-
|
220 |
-
# helper loading function that can be used by subclasses
|
221 |
-
def load_network(self, network, network_label, epoch_label):
|
222 |
-
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
|
223 |
-
save_path = os.path.join(self.save_dir, save_filename)
|
224 |
-
print('Loading network from %s' % save_path)
|
225 |
-
network.load_state_dict(torch.load(save_path, map_location='cpu'))
|
226 |
-
|
227 |
-
def update_learning_rate():
|
228 |
-
pass
|
229 |
-
|
230 |
-
def get_image_paths(self):
|
231 |
-
return self.image_paths
|
232 |
-
|
233 |
-
def save_done(self, flag=False):
|
234 |
-
np.save(os.path.join(self.save_dir, 'done_flag'), flag)
|
235 |
-
np.savetxt(os.path.join(self.save_dir, 'done_flag'), [flag, ], fmt='%i')
|
236 |
-
|
237 |
-
|
238 |
-
############################################################
|
239 |
-
# dist_model.py #
|
240 |
-
############################################################
|
241 |
-
|
242 |
-
import os
|
243 |
-
from collections import OrderedDict
|
244 |
-
from scipy.ndimage import zoom
|
245 |
-
from tqdm import tqdm
|
246 |
-
|
247 |
-
|
248 |
-
class DistModel(BaseModel):
|
249 |
-
def name(self):
|
250 |
-
return self.model_name
|
251 |
-
|
252 |
-
def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False,
|
253 |
-
model_path=None,
|
254 |
-
use_gpu=True, printNet=False, spatial=False,
|
255 |
-
is_train=False, lr=.0001, beta1=0.5, version='0.1'):
|
256 |
-
'''
|
257 |
-
INPUTS
|
258 |
-
model - ['net-lin'] for linearly calibrated network
|
259 |
-
['net'] for off-the-shelf network
|
260 |
-
['L2'] for L2 distance in Lab colorspace
|
261 |
-
['SSIM'] for ssim in RGB colorspace
|
262 |
-
net - ['squeeze','alex','vgg']
|
263 |
-
model_path - if None, will look in weights/[NET_NAME].pth
|
264 |
-
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
|
265 |
-
use_gpu - bool - whether or not to use a GPU
|
266 |
-
printNet - bool - whether or not to print network architecture out
|
267 |
-
spatial - bool - whether to output an array containing varying distances across spatial dimensions
|
268 |
-
spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below).
|
269 |
-
spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images.
|
270 |
-
spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear).
|
271 |
-
is_train - bool - [True] for training mode
|
272 |
-
lr - float - initial learning rate
|
273 |
-
beta1 - float - initial momentum term for adam
|
274 |
-
version - 0.1 for latest, 0.0 was original (with a bug)
|
275 |
-
'''
|
276 |
-
BaseModel.initialize(self, use_gpu=use_gpu)
|
277 |
-
|
278 |
-
self.model = model
|
279 |
-
self.net = net
|
280 |
-
self.is_train = is_train
|
281 |
-
self.spatial = spatial
|
282 |
-
self.model_name = '%s [%s]' % (model, net)
|
283 |
-
|
284 |
-
if (self.model == 'net-lin'): # pretrained net + linear layer
|
285 |
-
self.net = PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
|
286 |
-
use_dropout=True, spatial=spatial, version=version, lpips=True)
|
287 |
-
kw = dict(map_location='cpu')
|
288 |
-
if (model_path is None):
|
289 |
-
import inspect
|
290 |
-
model_path = os.path.abspath(
|
291 |
-
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'models', 'lpips_models', f'{net}.pth'))
|
292 |
-
|
293 |
-
if (not is_train):
|
294 |
-
self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
|
295 |
-
|
296 |
-
elif (self.model == 'net'): # pretrained network
|
297 |
-
self.net = PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
|
298 |
-
elif (self.model in ['L2', 'l2']):
|
299 |
-
self.net = L2(use_gpu=use_gpu, colorspace=colorspace) # not really a network, only for testing
|
300 |
-
self.model_name = 'L2'
|
301 |
-
elif (self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']):
|
302 |
-
self.net = DSSIM(use_gpu=use_gpu, colorspace=colorspace)
|
303 |
-
self.model_name = 'SSIM'
|
304 |
-
else:
|
305 |
-
raise ValueError("Model [%s] not recognized." % self.model)
|
306 |
-
|
307 |
-
self.trainable_parameters = list(self.net.parameters())
|
308 |
-
|
309 |
-
if self.is_train: # training mode
|
310 |
-
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
|
311 |
-
self.rankLoss = BCERankingLoss()
|
312 |
-
self.trainable_parameters += list(self.rankLoss.net.parameters())
|
313 |
-
self.lr = lr
|
314 |
-
self.old_lr = lr
|
315 |
-
self.optimizer_net = torch.optim.Adam(self.trainable_parameters, lr=lr, betas=(beta1, 0.999))
|
316 |
-
else: # test mode
|
317 |
-
self.net.eval()
|
318 |
-
|
319 |
-
# if (use_gpu):
|
320 |
-
# self.net.to(gpu_ids[0])
|
321 |
-
# self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
|
322 |
-
# if (self.is_train):
|
323 |
-
# self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
|
324 |
-
|
325 |
-
if (printNet):
|
326 |
-
print('---------- Networks initialized -------------')
|
327 |
-
print_network(self.net)
|
328 |
-
print('-----------------------------------------------')
|
329 |
-
|
330 |
-
def forward(self, in0, in1, retPerLayer=False):
|
331 |
-
''' Function computes the distance between image patches in0 and in1
|
332 |
-
INPUTS
|
333 |
-
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
|
334 |
-
OUTPUT
|
335 |
-
computed distances between in0 and in1
|
336 |
-
'''
|
337 |
-
|
338 |
-
return self.net(in0, in1, retPerLayer=retPerLayer)
|
339 |
-
|
340 |
-
# ***** TRAINING FUNCTIONS *****
|
341 |
-
def optimize_parameters(self):
|
342 |
-
self.forward_train()
|
343 |
-
self.optimizer_net.zero_grad()
|
344 |
-
self.backward_train()
|
345 |
-
self.optimizer_net.step()
|
346 |
-
self.clamp_weights()
|
347 |
-
|
348 |
-
def clamp_weights(self):
|
349 |
-
for module in self.net.modules():
|
350 |
-
if (hasattr(module, 'weight') and module.kernel_size == (1, 1)):
|
351 |
-
module.weight.data = torch.clamp(module.weight.data, min=0)
|
352 |
-
|
353 |
-
def set_input(self, data):
|
354 |
-
self.input_ref = data['ref']
|
355 |
-
self.input_p0 = data['p0']
|
356 |
-
self.input_p1 = data['p1']
|
357 |
-
self.input_judge = data['judge']
|
358 |
-
|
359 |
-
# if (self.use_gpu):
|
360 |
-
# self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
|
361 |
-
# self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
|
362 |
-
# self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
|
363 |
-
# self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
|
364 |
-
|
365 |
-
# self.var_ref = Variable(self.input_ref, requires_grad=True)
|
366 |
-
# self.var_p0 = Variable(self.input_p0, requires_grad=True)
|
367 |
-
# self.var_p1 = Variable(self.input_p1, requires_grad=True)
|
368 |
-
|
369 |
-
def forward_train(self): # run forward pass
|
370 |
-
# print(self.net.module.scaling_layer.shift)
|
371 |
-
# print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())
|
372 |
-
|
373 |
-
assert False, "We shoud've not get here when using LPIPS as a metric"
|
374 |
-
|
375 |
-
self.d0 = self(self.var_ref, self.var_p0)
|
376 |
-
self.d1 = self(self.var_ref, self.var_p1)
|
377 |
-
self.acc_r = self.compute_accuracy(self.d0, self.d1, self.input_judge)
|
378 |
-
|
379 |
-
self.var_judge = Variable(1. * self.input_judge).view(self.d0.size())
|
380 |
-
|
381 |
-
self.loss_total = self.rankLoss(self.d0, self.d1, self.var_judge * 2. - 1.)
|
382 |
-
|
383 |
-
return self.loss_total
|
384 |
-
|
385 |
-
def backward_train(self):
|
386 |
-
torch.mean(self.loss_total).backward()
|
387 |
-
|
388 |
-
def compute_accuracy(self, d0, d1, judge):
|
389 |
-
''' d0, d1 are Variables, judge is a Tensor '''
|
390 |
-
d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten()
|
391 |
-
judge_per = judge.cpu().numpy().flatten()
|
392 |
-
return d1_lt_d0 * judge_per + (1 - d1_lt_d0) * (1 - judge_per)
|
393 |
-
|
394 |
-
def get_current_errors(self):
|
395 |
-
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
|
396 |
-
('acc_r', self.acc_r)])
|
397 |
-
|
398 |
-
for key in retDict.keys():
|
399 |
-
retDict[key] = np.mean(retDict[key])
|
400 |
-
|
401 |
-
return retDict
|
402 |
-
|
403 |
-
def get_current_visuals(self):
|
404 |
-
zoom_factor = 256 / self.var_ref.data.size()[2]
|
405 |
-
|
406 |
-
ref_img = tensor2im(self.var_ref.data)
|
407 |
-
p0_img = tensor2im(self.var_p0.data)
|
408 |
-
p1_img = tensor2im(self.var_p1.data)
|
409 |
-
|
410 |
-
ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0)
|
411 |
-
p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0)
|
412 |
-
p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0)
|
413 |
-
|
414 |
-
return OrderedDict([('ref', ref_img_vis),
|
415 |
-
('p0', p0_img_vis),
|
416 |
-
('p1', p1_img_vis)])
|
417 |
-
|
418 |
-
def save(self, path, label):
|
419 |
-
if (self.use_gpu):
|
420 |
-
self.save_network(self.net.module, path, '', label)
|
421 |
-
else:
|
422 |
-
self.save_network(self.net, path, '', label)
|
423 |
-
self.save_network(self.rankLoss.net, path, 'rank', label)
|
424 |
-
|
425 |
-
def update_learning_rate(self, nepoch_decay):
|
426 |
-
lrd = self.lr / nepoch_decay
|
427 |
-
lr = self.old_lr - lrd
|
428 |
-
|
429 |
-
for param_group in self.optimizer_net.param_groups:
|
430 |
-
param_group['lr'] = lr
|
431 |
-
|
432 |
-
print('update lr [%s] decay: %f -> %f' % (type, self.old_lr, lr))
|
433 |
-
self.old_lr = lr
|
434 |
-
|
435 |
-
|
436 |
-
def score_2afc_dataset(data_loader, func, name=''):
|
437 |
-
''' Function computes Two Alternative Forced Choice (2AFC) score using
|
438 |
-
distance function 'func' in dataset 'data_loader'
|
439 |
-
INPUTS
|
440 |
-
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
|
441 |
-
func - callable distance function - calling d=func(in0,in1) should take 2
|
442 |
-
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
|
443 |
-
OUTPUTS
|
444 |
-
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
|
445 |
-
[1] - dictionary with following elements
|
446 |
-
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
|
447 |
-
gts - N array in [0,1], preferred patch selected by human evaluators
|
448 |
-
(closer to "0" for left patch p0, "1" for right patch p1,
|
449 |
-
"0.6" means 60pct people preferred right patch, 40pct preferred left)
|
450 |
-
scores - N array in [0,1], corresponding to what percentage function agreed with humans
|
451 |
-
CONSTS
|
452 |
-
N - number of test triplets in data_loader
|
453 |
-
'''
|
454 |
-
|
455 |
-
d0s = []
|
456 |
-
d1s = []
|
457 |
-
gts = []
|
458 |
-
|
459 |
-
for data in tqdm(data_loader.load_data(), desc=name):
|
460 |
-
d0s += func(data['ref'], data['p0']).data.cpu().numpy().flatten().tolist()
|
461 |
-
d1s += func(data['ref'], data['p1']).data.cpu().numpy().flatten().tolist()
|
462 |
-
gts += data['judge'].cpu().numpy().flatten().tolist()
|
463 |
-
|
464 |
-
d0s = np.array(d0s)
|
465 |
-
d1s = np.array(d1s)
|
466 |
-
gts = np.array(gts)
|
467 |
-
scores = (d0s < d1s) * (1. - gts) + (d1s < d0s) * gts + (d1s == d0s) * .5
|
468 |
-
|
469 |
-
return (np.mean(scores), dict(d0s=d0s, d1s=d1s, gts=gts, scores=scores))
|
470 |
-
|
471 |
-
|
472 |
-
def score_jnd_dataset(data_loader, func, name=''):
|
473 |
-
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
|
474 |
-
INPUTS
|
475 |
-
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
|
476 |
-
func - callable distance function - calling d=func(in0,in1) should take 2
|
477 |
-
pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
|
478 |
-
OUTPUTS
|
479 |
-
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
|
480 |
-
[1] - dictionary with following elements
|
481 |
-
ds - N array containing distances between two patches shown to human evaluator
|
482 |
-
sames - N array containing fraction of people who thought the two patches were identical
|
483 |
-
CONSTS
|
484 |
-
N - number of test triplets in data_loader
|
485 |
-
'''
|
486 |
-
|
487 |
-
ds = []
|
488 |
-
gts = []
|
489 |
-
|
490 |
-
for data in tqdm(data_loader.load_data(), desc=name):
|
491 |
-
ds += func(data['p0'], data['p1']).data.cpu().numpy().tolist()
|
492 |
-
gts += data['same'].cpu().numpy().flatten().tolist()
|
493 |
-
|
494 |
-
sames = np.array(gts)
|
495 |
-
ds = np.array(ds)
|
496 |
-
|
497 |
-
sorted_inds = np.argsort(ds)
|
498 |
-
ds_sorted = ds[sorted_inds]
|
499 |
-
sames_sorted = sames[sorted_inds]
|
500 |
-
|
501 |
-
TPs = np.cumsum(sames_sorted)
|
502 |
-
FPs = np.cumsum(1 - sames_sorted)
|
503 |
-
FNs = np.sum(sames_sorted) - TPs
|
504 |
-
|
505 |
-
precs = TPs / (TPs + FPs)
|
506 |
-
recs = TPs / (TPs + FNs)
|
507 |
-
score = voc_ap(recs, precs)
|
508 |
-
|
509 |
-
return (score, dict(ds=ds, sames=sames))
|
510 |
-
|
511 |
-
|
512 |
-
############################################################
|
513 |
-
# networks_basic.py #
|
514 |
-
############################################################
|
515 |
-
|
516 |
-
import torch.nn as nn
|
517 |
-
from torch.autograd import Variable
|
518 |
-
import numpy as np
|
519 |
-
|
520 |
-
|
521 |
-
def spatial_average(in_tens, keepdim=True):
|
522 |
-
return in_tens.mean([2, 3], keepdim=keepdim)
|
523 |
-
|
524 |
-
|
525 |
-
def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W
|
526 |
-
in_H = in_tens.shape[2]
|
527 |
-
scale_factor = 1. * out_H / in_H
|
528 |
-
|
529 |
-
return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens)
|
530 |
-
|
531 |
-
|
532 |
-
# Learned perceptual metric
|
533 |
-
class PNetLin(nn.Module):
|
534 |
-
def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False,
|
535 |
-
version='0.1', lpips=True):
|
536 |
-
super(PNetLin, self).__init__()
|
537 |
-
|
538 |
-
self.pnet_type = pnet_type
|
539 |
-
self.pnet_tune = pnet_tune
|
540 |
-
self.pnet_rand = pnet_rand
|
541 |
-
self.spatial = spatial
|
542 |
-
self.lpips = lpips
|
543 |
-
self.version = version
|
544 |
-
self.scaling_layer = ScalingLayer()
|
545 |
-
|
546 |
-
if (self.pnet_type in ['vgg', 'vgg16']):
|
547 |
-
net_type = vgg16
|
548 |
-
self.chns = [64, 128, 256, 512, 512]
|
549 |
-
elif (self.pnet_type == 'alex'):
|
550 |
-
net_type = alexnet
|
551 |
-
self.chns = [64, 192, 384, 256, 256]
|
552 |
-
elif (self.pnet_type == 'squeeze'):
|
553 |
-
net_type = squeezenet
|
554 |
-
self.chns = [64, 128, 256, 384, 384, 512, 512]
|
555 |
-
self.L = len(self.chns)
|
556 |
-
|
557 |
-
self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune)
|
558 |
-
|
559 |
-
if (lpips):
|
560 |
-
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
|
561 |
-
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
|
562 |
-
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
|
563 |
-
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
|
564 |
-
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
|
565 |
-
self.lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
|
566 |
-
if (self.pnet_type == 'squeeze'): # 7 layers for squeezenet
|
567 |
-
self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout)
|
568 |
-
self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout)
|
569 |
-
self.lins += [self.lin5, self.lin6]
|
570 |
-
|
571 |
-
def forward(self, in0, in1, retPerLayer=False):
|
572 |
-
# v0.0 - original release had a bug, where input was not scaled
|
573 |
-
in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version == '0.1' else (
|
574 |
-
in0, in1)
|
575 |
-
outs0, outs1 = self.net(in0_input), self.net(in1_input)
|
576 |
-
feats0, feats1, diffs = {}, {}, {}
|
577 |
-
|
578 |
-
for kk in range(self.L):
|
579 |
-
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
|
580 |
-
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
|
581 |
-
|
582 |
-
if (self.lpips):
|
583 |
-
if (self.spatial):
|
584 |
-
res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)]
|
585 |
-
else:
|
586 |
-
res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)]
|
587 |
-
else:
|
588 |
-
if (self.spatial):
|
589 |
-
res = [upsample(diffs[kk].sum(dim=1, keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)]
|
590 |
-
else:
|
591 |
-
res = [spatial_average(diffs[kk].sum(dim=1, keepdim=True), keepdim=True) for kk in range(self.L)]
|
592 |
-
|
593 |
-
val = res[0]
|
594 |
-
for l in range(1, self.L):
|
595 |
-
val += res[l]
|
596 |
-
|
597 |
-
if (retPerLayer):
|
598 |
-
return (val, res)
|
599 |
-
else:
|
600 |
-
return val
|
601 |
-
|
602 |
-
|
603 |
-
class ScalingLayer(nn.Module):
|
604 |
-
def __init__(self):
|
605 |
-
super(ScalingLayer, self).__init__()
|
606 |
-
self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
|
607 |
-
self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
|
608 |
-
|
609 |
-
def forward(self, inp):
|
610 |
-
return (inp - self.shift) / self.scale
|
611 |
-
|
612 |
-
|
613 |
-
class NetLinLayer(nn.Module):
|
614 |
-
''' A single linear layer which does a 1x1 conv '''
|
615 |
-
|
616 |
-
def __init__(self, chn_in, chn_out=1, use_dropout=False):
|
617 |
-
super(NetLinLayer, self).__init__()
|
618 |
-
|
619 |
-
layers = [nn.Dropout(), ] if (use_dropout) else []
|
620 |
-
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
|
621 |
-
self.model = nn.Sequential(*layers)
|
622 |
-
|
623 |
-
|
624 |
-
class Dist2LogitLayer(nn.Module):
|
625 |
-
''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) '''
|
626 |
-
|
627 |
-
def __init__(self, chn_mid=32, use_sigmoid=True):
|
628 |
-
super(Dist2LogitLayer, self).__init__()
|
629 |
-
|
630 |
-
layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True), ]
|
631 |
-
layers += [nn.LeakyReLU(0.2, True), ]
|
632 |
-
layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True), ]
|
633 |
-
layers += [nn.LeakyReLU(0.2, True), ]
|
634 |
-
layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True), ]
|
635 |
-
if (use_sigmoid):
|
636 |
-
layers += [nn.Sigmoid(), ]
|
637 |
-
self.model = nn.Sequential(*layers)
|
638 |
-
|
639 |
-
def forward(self, d0, d1, eps=0.1):
|
640 |
-
return self.model(torch.cat((d0, d1, d0 - d1, d0 / (d1 + eps), d1 / (d0 + eps)), dim=1))
|
641 |
-
|
642 |
-
|
643 |
-
class BCERankingLoss(nn.Module):
|
644 |
-
def __init__(self, chn_mid=32):
|
645 |
-
super(BCERankingLoss, self).__init__()
|
646 |
-
self.net = Dist2LogitLayer(chn_mid=chn_mid)
|
647 |
-
# self.parameters = list(self.net.parameters())
|
648 |
-
self.loss = torch.nn.BCELoss()
|
649 |
-
|
650 |
-
def forward(self, d0, d1, judge):
|
651 |
-
per = (judge + 1.) / 2.
|
652 |
-
self.logit = self.net(d0, d1)
|
653 |
-
return self.loss(self.logit, per)
|
654 |
-
|
655 |
-
|
656 |
-
# L2, DSSIM metrics
|
657 |
-
class FakeNet(nn.Module):
|
658 |
-
def __init__(self, use_gpu=True, colorspace='Lab'):
|
659 |
-
super(FakeNet, self).__init__()
|
660 |
-
self.use_gpu = use_gpu
|
661 |
-
self.colorspace = colorspace
|
662 |
-
|
663 |
-
|
664 |
-
class L2(FakeNet):
|
665 |
-
|
666 |
-
def forward(self, in0, in1, retPerLayer=None):
|
667 |
-
assert (in0.size()[0] == 1) # currently only supports batchSize 1
|
668 |
-
|
669 |
-
if (self.colorspace == 'RGB'):
|
670 |
-
(N, C, X, Y) = in0.size()
|
671 |
-
value = torch.mean(torch.mean(torch.mean((in0 - in1) ** 2, dim=1).view(N, 1, X, Y), dim=2).view(N, 1, 1, Y),
|
672 |
-
dim=3).view(N)
|
673 |
-
return value
|
674 |
-
elif (self.colorspace == 'Lab'):
|
675 |
-
value = l2(tensor2np(tensor2tensorlab(in0.data, to_norm=False)),
|
676 |
-
tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float')
|
677 |
-
ret_var = Variable(torch.Tensor((value,)))
|
678 |
-
# if (self.use_gpu):
|
679 |
-
# ret_var = ret_var.cuda()
|
680 |
-
return ret_var
|
681 |
-
|
682 |
-
|
683 |
-
class DSSIM(FakeNet):
|
684 |
-
|
685 |
-
def forward(self, in0, in1, retPerLayer=None):
|
686 |
-
assert (in0.size()[0] == 1) # currently only supports batchSize 1
|
687 |
-
|
688 |
-
if (self.colorspace == 'RGB'):
|
689 |
-
value = dssim(1. * tensor2im(in0.data), 1. * tensor2im(in1.data), range=255.).astype('float')
|
690 |
-
elif (self.colorspace == 'Lab'):
|
691 |
-
value = dssim(tensor2np(tensor2tensorlab(in0.data, to_norm=False)),
|
692 |
-
tensor2np(tensor2tensorlab(in1.data, to_norm=False)), range=100.).astype('float')
|
693 |
-
ret_var = Variable(torch.Tensor((value,)))
|
694 |
-
# if (self.use_gpu):
|
695 |
-
# ret_var = ret_var.cuda()
|
696 |
-
return ret_var
|
697 |
-
|
698 |
-
|
699 |
-
def print_network(net):
|
700 |
-
num_params = 0
|
701 |
-
for param in net.parameters():
|
702 |
-
num_params += param.numel()
|
703 |
-
print('Network', net)
|
704 |
-
print('Total number of parameters: %d' % num_params)
|
705 |
-
|
706 |
-
|
707 |
-
############################################################
|
708 |
-
# pretrained_networks.py #
|
709 |
-
############################################################
|
710 |
-
|
711 |
-
from collections import namedtuple
|
712 |
-
import torch
|
713 |
-
from torchvision import models as tv
|
714 |
-
|
715 |
-
|
716 |
-
class squeezenet(torch.nn.Module):
|
717 |
-
def __init__(self, requires_grad=False, pretrained=True):
|
718 |
-
super(squeezenet, self).__init__()
|
719 |
-
pretrained_features = tv.squeezenet1_1(pretrained=pretrained).features
|
720 |
-
self.slice1 = torch.nn.Sequential()
|
721 |
-
self.slice2 = torch.nn.Sequential()
|
722 |
-
self.slice3 = torch.nn.Sequential()
|
723 |
-
self.slice4 = torch.nn.Sequential()
|
724 |
-
self.slice5 = torch.nn.Sequential()
|
725 |
-
self.slice6 = torch.nn.Sequential()
|
726 |
-
self.slice7 = torch.nn.Sequential()
|
727 |
-
self.N_slices = 7
|
728 |
-
for x in range(2):
|
729 |
-
self.slice1.add_module(str(x), pretrained_features[x])
|
730 |
-
for x in range(2, 5):
|
731 |
-
self.slice2.add_module(str(x), pretrained_features[x])
|
732 |
-
for x in range(5, 8):
|
733 |
-
self.slice3.add_module(str(x), pretrained_features[x])
|
734 |
-
for x in range(8, 10):
|
735 |
-
self.slice4.add_module(str(x), pretrained_features[x])
|
736 |
-
for x in range(10, 11):
|
737 |
-
self.slice5.add_module(str(x), pretrained_features[x])
|
738 |
-
for x in range(11, 12):
|
739 |
-
self.slice6.add_module(str(x), pretrained_features[x])
|
740 |
-
for x in range(12, 13):
|
741 |
-
self.slice7.add_module(str(x), pretrained_features[x])
|
742 |
-
if not requires_grad:
|
743 |
-
for param in self.parameters():
|
744 |
-
param.requires_grad = False
|
745 |
-
|
746 |
-
def forward(self, X):
|
747 |
-
h = self.slice1(X)
|
748 |
-
h_relu1 = h
|
749 |
-
h = self.slice2(h)
|
750 |
-
h_relu2 = h
|
751 |
-
h = self.slice3(h)
|
752 |
-
h_relu3 = h
|
753 |
-
h = self.slice4(h)
|
754 |
-
h_relu4 = h
|
755 |
-
h = self.slice5(h)
|
756 |
-
h_relu5 = h
|
757 |
-
h = self.slice6(h)
|
758 |
-
h_relu6 = h
|
759 |
-
h = self.slice7(h)
|
760 |
-
h_relu7 = h
|
761 |
-
vgg_outputs = namedtuple("SqueezeOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5', 'relu6', 'relu7'])
|
762 |
-
out = vgg_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5, h_relu6, h_relu7)
|
763 |
-
|
764 |
-
return out
|
765 |
-
|
766 |
-
|
767 |
-
class alexnet(torch.nn.Module):
|
768 |
-
def __init__(self, requires_grad=False, pretrained=True):
|
769 |
-
super(alexnet, self).__init__()
|
770 |
-
alexnet_pretrained_features = tv.alexnet(pretrained=pretrained).features
|
771 |
-
self.slice1 = torch.nn.Sequential()
|
772 |
-
self.slice2 = torch.nn.Sequential()
|
773 |
-
self.slice3 = torch.nn.Sequential()
|
774 |
-
self.slice4 = torch.nn.Sequential()
|
775 |
-
self.slice5 = torch.nn.Sequential()
|
776 |
-
self.N_slices = 5
|
777 |
-
for x in range(2):
|
778 |
-
self.slice1.add_module(str(x), alexnet_pretrained_features[x])
|
779 |
-
for x in range(2, 5):
|
780 |
-
self.slice2.add_module(str(x), alexnet_pretrained_features[x])
|
781 |
-
for x in range(5, 8):
|
782 |
-
self.slice3.add_module(str(x), alexnet_pretrained_features[x])
|
783 |
-
for x in range(8, 10):
|
784 |
-
self.slice4.add_module(str(x), alexnet_pretrained_features[x])
|
785 |
-
for x in range(10, 12):
|
786 |
-
self.slice5.add_module(str(x), alexnet_pretrained_features[x])
|
787 |
-
if not requires_grad:
|
788 |
-
for param in self.parameters():
|
789 |
-
param.requires_grad = False
|
790 |
-
|
791 |
-
def forward(self, X):
|
792 |
-
h = self.slice1(X)
|
793 |
-
h_relu1 = h
|
794 |
-
h = self.slice2(h)
|
795 |
-
h_relu2 = h
|
796 |
-
h = self.slice3(h)
|
797 |
-
h_relu3 = h
|
798 |
-
h = self.slice4(h)
|
799 |
-
h_relu4 = h
|
800 |
-
h = self.slice5(h)
|
801 |
-
h_relu5 = h
|
802 |
-
alexnet_outputs = namedtuple("AlexnetOutputs", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])
|
803 |
-
out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)
|
804 |
-
|
805 |
-
return out
|
806 |
-
|
807 |
-
|
808 |
-
class vgg16(torch.nn.Module):
|
809 |
-
def __init__(self, requires_grad=False, pretrained=True):
|
810 |
-
super(vgg16, self).__init__()
|
811 |
-
vgg_pretrained_features = tv.vgg16(pretrained=pretrained).features
|
812 |
-
self.slice1 = torch.nn.Sequential()
|
813 |
-
self.slice2 = torch.nn.Sequential()
|
814 |
-
self.slice3 = torch.nn.Sequential()
|
815 |
-
self.slice4 = torch.nn.Sequential()
|
816 |
-
self.slice5 = torch.nn.Sequential()
|
817 |
-
self.N_slices = 5
|
818 |
-
for x in range(4):
|
819 |
-
self.slice1.add_module(str(x), vgg_pretrained_features[x])
|
820 |
-
for x in range(4, 9):
|
821 |
-
self.slice2.add_module(str(x), vgg_pretrained_features[x])
|
822 |
-
for x in range(9, 16):
|
823 |
-
self.slice3.add_module(str(x), vgg_pretrained_features[x])
|
824 |
-
for x in range(16, 23):
|
825 |
-
self.slice4.add_module(str(x), vgg_pretrained_features[x])
|
826 |
-
for x in range(23, 30):
|
827 |
-
self.slice5.add_module(str(x), vgg_pretrained_features[x])
|
828 |
-
if not requires_grad:
|
829 |
-
for param in self.parameters():
|
830 |
-
param.requires_grad = False
|
831 |
-
|
832 |
-
def forward(self, X):
|
833 |
-
h = self.slice1(X)
|
834 |
-
h_relu1_2 = h
|
835 |
-
h = self.slice2(h)
|
836 |
-
h_relu2_2 = h
|
837 |
-
h = self.slice3(h)
|
838 |
-
h_relu3_3 = h
|
839 |
-
h = self.slice4(h)
|
840 |
-
h_relu4_3 = h
|
841 |
-
h = self.slice5(h)
|
842 |
-
h_relu5_3 = h
|
843 |
-
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
|
844 |
-
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
|
845 |
-
|
846 |
-
return out
|
847 |
-
|
848 |
-
|
849 |
-
class resnet(torch.nn.Module):
|
850 |
-
def __init__(self, requires_grad=False, pretrained=True, num=18):
|
851 |
-
super(resnet, self).__init__()
|
852 |
-
if (num == 18):
|
853 |
-
self.net = tv.resnet18(pretrained=pretrained)
|
854 |
-
elif (num == 34):
|
855 |
-
self.net = tv.resnet34(pretrained=pretrained)
|
856 |
-
elif (num == 50):
|
857 |
-
self.net = tv.resnet50(pretrained=pretrained)
|
858 |
-
elif (num == 101):
|
859 |
-
self.net = tv.resnet101(pretrained=pretrained)
|
860 |
-
elif (num == 152):
|
861 |
-
self.net = tv.resnet152(pretrained=pretrained)
|
862 |
-
self.N_slices = 5
|
863 |
-
|
864 |
-
self.conv1 = self.net.conv1
|
865 |
-
self.bn1 = self.net.bn1
|
866 |
-
self.relu = self.net.relu
|
867 |
-
self.maxpool = self.net.maxpool
|
868 |
-
self.layer1 = self.net.layer1
|
869 |
-
self.layer2 = self.net.layer2
|
870 |
-
self.layer3 = self.net.layer3
|
871 |
-
self.layer4 = self.net.layer4
|
872 |
-
|
873 |
-
def forward(self, X):
|
874 |
-
h = self.conv1(X)
|
875 |
-
h = self.bn1(h)
|
876 |
-
h = self.relu(h)
|
877 |
-
h_relu1 = h
|
878 |
-
h = self.maxpool(h)
|
879 |
-
h = self.layer1(h)
|
880 |
-
h_conv2 = h
|
881 |
-
h = self.layer2(h)
|
882 |
-
h_conv3 = h
|
883 |
-
h = self.layer3(h)
|
884 |
-
h_conv4 = h
|
885 |
-
h = self.layer4(h)
|
886 |
-
h_conv5 = h
|
887 |
-
|
888 |
-
outputs = namedtuple("Outputs", ['relu1', 'conv2', 'conv3', 'conv4', 'conv5'])
|
889 |
-
out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)
|
890 |
-
|
891 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/facerender/sync_batchnorm/unittest.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# File : unittest.py
|
3 |
-
# Author : Jiayuan Mao
|
4 |
-
# Email : [email protected]
|
5 |
-
# Date : 27/01/2018
|
6 |
-
#
|
7 |
-
# This file is part of Synchronized-BatchNorm-PyTorch.
|
8 |
-
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
|
9 |
-
# Distributed under MIT License.
|
10 |
-
|
11 |
-
import unittest
|
12 |
-
|
13 |
-
import numpy as np
|
14 |
-
from torch.autograd import Variable
|
15 |
-
|
16 |
-
|
17 |
-
def as_numpy(v):
|
18 |
-
if isinstance(v, Variable):
|
19 |
-
v = v.data
|
20 |
-
return v.cpu().numpy()
|
21 |
-
|
22 |
-
|
23 |
-
class TorchTestCase(unittest.TestCase):
|
24 |
-
def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
|
25 |
-
npa, npb = as_numpy(a), as_numpy(b)
|
26 |
-
self.assertTrue(
|
27 |
-
np.allclose(npa, npb, atol=atol),
|
28 |
-
'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
|
29 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/main.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
2 |
-
|
3 |
-
def main():
|
4 |
-
import gradio as gr
|
5 |
-
from request_llm.bridge_all import predict
|
6 |
-
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, DummyWith
|
7 |
-
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
|
8 |
-
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION, CHATBOT_HEIGHT, LAYOUT, API_KEY, AVAIL_LLM_MODELS = \
|
9 |
-
get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION', 'CHATBOT_HEIGHT', 'LAYOUT', 'API_KEY', 'AVAIL_LLM_MODELS')
|
10 |
-
|
11 |
-
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
12 |
-
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
13 |
-
if not AUTHENTICATION: AUTHENTICATION = None
|
14 |
-
|
15 |
-
from check_proxy import get_current_version
|
16 |
-
initial_prompt = "Serve me as a writing and programming assistant."
|
17 |
-
title_html = f"<h1 align=\"center\">ChatGPT 学术优化 {get_current_version()}</h1>"
|
18 |
-
description = """代码开源和更新[地址🚀](https://github.com/binary-husky/chatgpt_academic),感谢热情的[开发者们❤️](https://github.com/binary-husky/chatgpt_academic/graphs/contributors)"""
|
19 |
-
|
20 |
-
# 问询记录, python 版本建议3.9+(越新越好)
|
21 |
-
import logging
|
22 |
-
os.makedirs("gpt_log", exist_ok=True)
|
23 |
-
try:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO, encoding="utf-8")
|
24 |
-
except:logging.basicConfig(filename="gpt_log/chat_secrets.log", level=logging.INFO)
|
25 |
-
print("所有问询记录将自动保存在本地目录./gpt_log/chat_secrets.log, 请注意自我隐私保护哦!")
|
26 |
-
|
27 |
-
# 一些普通功能模块
|
28 |
-
from core_functional import get_core_functions
|
29 |
-
functional = get_core_functions()
|
30 |
-
|
31 |
-
# 高级函数插件
|
32 |
-
from crazy_functional import get_crazy_functions
|
33 |
-
crazy_fns = get_crazy_functions()
|
34 |
-
|
35 |
-
# 处理markdown文本格式的转变
|
36 |
-
gr.Chatbot.postprocess = format_io
|
37 |
-
|
38 |
-
# 做一些外观色彩上的调整
|
39 |
-
from theme import adjust_theme, advanced_css
|
40 |
-
set_theme = adjust_theme()
|
41 |
-
|
42 |
-
# 代理与自动更新
|
43 |
-
from check_proxy import check_proxy, auto_update, warm_up_modules
|
44 |
-
proxy_info = check_proxy(proxies)
|
45 |
-
|
46 |
-
gr_L1 = lambda: gr.Row().style()
|
47 |
-
gr_L2 = lambda scale: gr.Column(scale=scale)
|
48 |
-
if LAYOUT == "TOP-DOWN":
|
49 |
-
gr_L1 = lambda: DummyWith()
|
50 |
-
gr_L2 = lambda scale: gr.Row()
|
51 |
-
CHATBOT_HEIGHT /= 2
|
52 |
-
|
53 |
-
cancel_handles = []
|
54 |
-
with gr.Blocks(title="ChatGPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
55 |
-
gr.HTML(title_html)
|
56 |
-
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
|
57 |
-
cookies = gr.State({'api_key': API_KEY, 'llm_model': LLM_MODEL})
|
58 |
-
with gr_L1():
|
59 |
-
with gr_L2(scale=2):
|
60 |
-
chatbot = gr.Chatbot()
|
61 |
-
chatbot.style(height=CHATBOT_HEIGHT)
|
62 |
-
history = gr.State([])
|
63 |
-
with gr_L2(scale=1):
|
64 |
-
with gr.Accordion("输入区", open=True) as area_input_primary:
|
65 |
-
with gr.Row():
|
66 |
-
txt = gr.Textbox(show_label=False, lines=2, placeholder="输入问题或API密钥,输入多个密钥时,用英文逗号间隔。支持OpenAI密钥和API2D密钥共存。").style(container=False)
|
67 |
-
with gr.Row():
|
68 |
-
submitBtn = gr.Button("提交", variant="primary")
|
69 |
-
with gr.Row():
|
70 |
-
resetBtn = gr.Button("重置", variant="secondary"); resetBtn.style(size="sm")
|
71 |
-
stopBtn = gr.Button("停止", variant="secondary"); stopBtn.style(size="sm")
|
72 |
-
clearBtn = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
|
73 |
-
with gr.Row():
|
74 |
-
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}")
|
75 |
-
with gr.Accordion("基础功能区", open=True) as area_basic_fn:
|
76 |
-
with gr.Row():
|
77 |
-
for k in functional:
|
78 |
-
variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
|
79 |
-
functional[k]["Button"] = gr.Button(k, variant=variant)
|
80 |
-
with gr.Accordion("函数插��区", open=True) as area_crazy_fn:
|
81 |
-
with gr.Row():
|
82 |
-
gr.Markdown("注意:以下“红颜色”标识的函数插件需从输入区读取路径作为参数.")
|
83 |
-
with gr.Row():
|
84 |
-
for k in crazy_fns:
|
85 |
-
if not crazy_fns[k].get("AsButton", True): continue
|
86 |
-
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
87 |
-
crazy_fns[k]["Button"] = gr.Button(k, variant=variant)
|
88 |
-
crazy_fns[k]["Button"].style(size="sm")
|
89 |
-
with gr.Row():
|
90 |
-
with gr.Accordion("更多函数插件", open=True):
|
91 |
-
dropdown_fn_list = [k for k in crazy_fns.keys() if not crazy_fns[k].get("AsButton", True)]
|
92 |
-
with gr.Column(scale=1):
|
93 |
-
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="").style(container=False)
|
94 |
-
with gr.Column(scale=1):
|
95 |
-
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary")
|
96 |
-
with gr.Row():
|
97 |
-
with gr.Accordion("点击展开“文件上传区”。上传本地文件可供红色函数插件调用。", open=False) as area_file_up:
|
98 |
-
file_upload = gr.Files(label="任何文件, 但推荐上传压缩文件(zip, tar)", file_count="multiple")
|
99 |
-
with gr.Accordion("更换模型 & SysPrompt & 交互界面布局", open=(LAYOUT == "TOP-DOWN")):
|
100 |
-
system_prompt = gr.Textbox(show_label=True, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
|
101 |
-
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
102 |
-
temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
|
103 |
-
max_length_sl = gr.Slider(minimum=256, maximum=4096, value=512, step=1, interactive=True, label="Local LLM MaxLength",)
|
104 |
-
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "底部输入区", "输入清除键"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区")
|
105 |
-
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
106 |
-
|
107 |
-
gr.Markdown(description)
|
108 |
-
with gr.Accordion("备选输入区", open=True, visible=False) as area_input_secondary:
|
109 |
-
with gr.Row():
|
110 |
-
txt2 = gr.Textbox(show_label=False, placeholder="Input question here.", label="输入区2").style(container=False)
|
111 |
-
with gr.Row():
|
112 |
-
submitBtn2 = gr.Button("提交", variant="primary")
|
113 |
-
with gr.Row():
|
114 |
-
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
115 |
-
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
116 |
-
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn.style(size="sm")
|
117 |
-
# 功能区显示开关与功能区的互动
|
118 |
-
def fn_area_visibility(a):
|
119 |
-
ret = {}
|
120 |
-
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
121 |
-
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
122 |
-
ret.update({area_input_primary: gr.update(visible=("底部输入区" not in a))})
|
123 |
-
ret.update({area_input_secondary: gr.update(visible=("底部输入区" in a))})
|
124 |
-
ret.update({clearBtn: gr.update(visible=("输入清除键" in a))})
|
125 |
-
ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))})
|
126 |
-
if "底部输入区" in a: ret.update({txt: gr.update(value="")})
|
127 |
-
return ret
|
128 |
-
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2] )
|
129 |
-
# 整理反复出现的控件句柄组合
|
130 |
-
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt]
|
131 |
-
output_combo = [cookies, chatbot, history, status]
|
132 |
-
predict_args = dict(fn=ArgsGeneralWrapper(predict), inputs=input_combo, outputs=output_combo)
|
133 |
-
# 提交按钮、重置按钮
|
134 |
-
cancel_handles.append(txt.submit(**predict_args))
|
135 |
-
cancel_handles.append(txt2.submit(**predict_args))
|
136 |
-
cancel_handles.append(submitBtn.click(**predict_args))
|
137 |
-
cancel_handles.append(submitBtn2.click(**predict_args))
|
138 |
-
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
139 |
-
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status])
|
140 |
-
clearBtn.click(lambda: ("",""), None, [txt, txt2])
|
141 |
-
clearBtn2.click(lambda: ("",""), None, [txt, txt2])
|
142 |
-
# 基础功能区的回调函数注册
|
143 |
-
for k in functional:
|
144 |
-
click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
|
145 |
-
cancel_handles.append(click_handle)
|
146 |
-
# 文件上传区,接收文件后与chatbot的互动
|
147 |
-
file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes], [chatbot, txt, txt2])
|
148 |
-
# 函数插件-固定按钮区
|
149 |
-
for k in crazy_fns:
|
150 |
-
if not crazy_fns[k].get("AsButton", True): continue
|
151 |
-
click_handle = crazy_fns[k]["Button"].click(ArgsGeneralWrapper(crazy_fns[k]["Function"]), [*input_combo, gr.State(PORT)], output_combo)
|
152 |
-
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
153 |
-
cancel_handles.append(click_handle)
|
154 |
-
# 函数插件-下拉菜单与随变按钮的互动
|
155 |
-
def on_dropdown_changed(k):
|
156 |
-
variant = crazy_fns[k]["Color"] if "Color" in crazy_fns[k] else "secondary"
|
157 |
-
return {switchy_bt: gr.update(value=k, variant=variant)}
|
158 |
-
dropdown.select(on_dropdown_changed, [dropdown], [switchy_bt] )
|
159 |
-
# 随变按钮的回调函数注册
|
160 |
-
def route(k, *args, **kwargs):
|
161 |
-
if k in [r"打开插件列表", r"请先从插件列表中选择"]: return
|
162 |
-
yield from ArgsGeneralWrapper(crazy_fns[k]["Function"])(*args, **kwargs)
|
163 |
-
click_handle = switchy_bt.click(route,[switchy_bt, *input_combo, gr.State(PORT)], output_combo)
|
164 |
-
click_handle.then(on_report_generated, [file_upload, chatbot], [file_upload, chatbot])
|
165 |
-
# def expand_file_area(file_upload, area_file_up):
|
166 |
-
# if len(file_upload)>0: return {area_file_up: gr.update(open=True)}
|
167 |
-
# click_handle.then(expand_file_area, [file_upload, area_file_up], [area_file_up])
|
168 |
-
cancel_handles.append(click_handle)
|
169 |
-
# 终止按钮的回调函数注册
|
170 |
-
stopBtn.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
171 |
-
stopBtn2.click(fn=None, inputs=None, outputs=None, cancels=cancel_handles)
|
172 |
-
|
173 |
-
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
174 |
-
def auto_opentab_delay():
|
175 |
-
import threading, webbrowser, time
|
176 |
-
print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
|
177 |
-
print(f"\t(亮色主题): http://localhost:{PORT}")
|
178 |
-
print(f"\t(暗色主题): http://localhost:{PORT}/?__dark-theme=true")
|
179 |
-
def open():
|
180 |
-
time.sleep(2) # 打开浏览器
|
181 |
-
webbrowser.open_new_tab(f"http://localhost:{PORT}/?__dark-theme=true")
|
182 |
-
threading.Thread(target=open, name="open-browser", daemon=True).start()
|
183 |
-
threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
|
184 |
-
threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
|
185 |
-
|
186 |
-
auto_opentab_delay()
|
187 |
-
demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png")
|
188 |
-
|
189 |
-
if __name__ == "__main__":
|
190 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/facial_recognition/helpers.py
DELETED
@@ -1,119 +0,0 @@
|
|
1 |
-
from collections import namedtuple
|
2 |
-
import torch
|
3 |
-
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
|
4 |
-
|
5 |
-
"""
|
6 |
-
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
|
7 |
-
"""
|
8 |
-
|
9 |
-
|
10 |
-
class Flatten(Module):
|
11 |
-
def forward(self, input):
|
12 |
-
return input.view(input.size(0), -1)
|
13 |
-
|
14 |
-
|
15 |
-
def l2_norm(input, axis=1):
|
16 |
-
norm = torch.norm(input, 2, axis, True)
|
17 |
-
output = torch.div(input, norm)
|
18 |
-
return output
|
19 |
-
|
20 |
-
|
21 |
-
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
|
22 |
-
""" A named tuple describing a ResNet block. """
|
23 |
-
|
24 |
-
|
25 |
-
def get_block(in_channel, depth, num_units, stride=2):
|
26 |
-
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
|
27 |
-
|
28 |
-
|
29 |
-
def get_blocks(num_layers):
|
30 |
-
if num_layers == 50:
|
31 |
-
blocks = [
|
32 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
33 |
-
get_block(in_channel=64, depth=128, num_units=4),
|
34 |
-
get_block(in_channel=128, depth=256, num_units=14),
|
35 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
36 |
-
]
|
37 |
-
elif num_layers == 100:
|
38 |
-
blocks = [
|
39 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
40 |
-
get_block(in_channel=64, depth=128, num_units=13),
|
41 |
-
get_block(in_channel=128, depth=256, num_units=30),
|
42 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
43 |
-
]
|
44 |
-
elif num_layers == 152:
|
45 |
-
blocks = [
|
46 |
-
get_block(in_channel=64, depth=64, num_units=3),
|
47 |
-
get_block(in_channel=64, depth=128, num_units=8),
|
48 |
-
get_block(in_channel=128, depth=256, num_units=36),
|
49 |
-
get_block(in_channel=256, depth=512, num_units=3)
|
50 |
-
]
|
51 |
-
else:
|
52 |
-
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
|
53 |
-
return blocks
|
54 |
-
|
55 |
-
|
56 |
-
class SEModule(Module):
|
57 |
-
def __init__(self, channels, reduction):
|
58 |
-
super(SEModule, self).__init__()
|
59 |
-
self.avg_pool = AdaptiveAvgPool2d(1)
|
60 |
-
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
|
61 |
-
self.relu = ReLU(inplace=True)
|
62 |
-
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
|
63 |
-
self.sigmoid = Sigmoid()
|
64 |
-
|
65 |
-
def forward(self, x):
|
66 |
-
module_input = x
|
67 |
-
x = self.avg_pool(x)
|
68 |
-
x = self.fc1(x)
|
69 |
-
x = self.relu(x)
|
70 |
-
x = self.fc2(x)
|
71 |
-
x = self.sigmoid(x)
|
72 |
-
return module_input * x
|
73 |
-
|
74 |
-
|
75 |
-
class bottleneck_IR(Module):
|
76 |
-
def __init__(self, in_channel, depth, stride):
|
77 |
-
super(bottleneck_IR, self).__init__()
|
78 |
-
if in_channel == depth:
|
79 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
80 |
-
else:
|
81 |
-
self.shortcut_layer = Sequential(
|
82 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
83 |
-
BatchNorm2d(depth)
|
84 |
-
)
|
85 |
-
self.res_layer = Sequential(
|
86 |
-
BatchNorm2d(in_channel),
|
87 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
|
88 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
|
89 |
-
)
|
90 |
-
|
91 |
-
def forward(self, x):
|
92 |
-
shortcut = self.shortcut_layer(x)
|
93 |
-
res = self.res_layer(x)
|
94 |
-
return res + shortcut
|
95 |
-
|
96 |
-
|
97 |
-
class bottleneck_IR_SE(Module):
|
98 |
-
def __init__(self, in_channel, depth, stride):
|
99 |
-
super(bottleneck_IR_SE, self).__init__()
|
100 |
-
if in_channel == depth:
|
101 |
-
self.shortcut_layer = MaxPool2d(1, stride)
|
102 |
-
else:
|
103 |
-
self.shortcut_layer = Sequential(
|
104 |
-
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
|
105 |
-
BatchNorm2d(depth)
|
106 |
-
)
|
107 |
-
self.res_layer = Sequential(
|
108 |
-
BatchNorm2d(in_channel),
|
109 |
-
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
|
110 |
-
PReLU(depth),
|
111 |
-
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
|
112 |
-
BatchNorm2d(depth),
|
113 |
-
SEModule(depth, 16)
|
114 |
-
)
|
115 |
-
|
116 |
-
def forward(self, x):
|
117 |
-
shortcut = self.shortcut_layer(x)
|
118 |
-
res = self.res_layer(x)
|
119 |
-
return res + shortcut
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AndrewRWilliams/video-whisper/app.py
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
# https://huggingface.co/spaces/aadnk/whisper-webui/blob/main/app.py
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
import os
|
5 |
-
import re
|
6 |
-
import unicodedata
|
7 |
-
import pathlib
|
8 |
-
import asyncio
|
9 |
-
import ffmpeg
|
10 |
-
|
11 |
-
import whisper
|
12 |
-
from whisper.utils import write_srt
|
13 |
-
|
14 |
-
MAX_FILE_PREFIX_LENGTH = 17
|
15 |
-
|
16 |
-
model = whisper.load_model("base")
|
17 |
-
|
18 |
-
demo = gr.Blocks(cache_examples=False)
|
19 |
-
|
20 |
-
def slugify(value, allow_unicode=False):
|
21 |
-
"""
|
22 |
-
Taken from https://github.com/django/django/blob/master/django/utils/text.py
|
23 |
-
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
|
24 |
-
dashes to single dashes. Remove characters that aren't alphanumerics,
|
25 |
-
underscores, or hyphens. Convert to lowercase. Also strip leading and
|
26 |
-
trailing whitespace, dashes, and underscores.
|
27 |
-
"""
|
28 |
-
value = str(value)
|
29 |
-
if allow_unicode:
|
30 |
-
value = unicodedata.normalize('NFKC', value)
|
31 |
-
else:
|
32 |
-
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
|
33 |
-
value = re.sub(r'[^\w\s-]', '', value.lower())
|
34 |
-
return re.sub(r'[-\s]+', '-', value).strip('-_')
|
35 |
-
|
36 |
-
async def transcribe(file):
|
37 |
-
|
38 |
-
print(type(file))
|
39 |
-
audio = whisper.load_audio(file)
|
40 |
-
# transcribe_options = dict(beam_size=5, best_of=5, without_timestamps=False)
|
41 |
-
|
42 |
-
# result = model.transcribe(file, **transcribe_options)
|
43 |
-
result = model.transcribe(audio)
|
44 |
-
|
45 |
-
file_path = pathlib.Path(file)
|
46 |
-
sourceName = file_path.stem[:MAX_FILE_PREFIX_LENGTH] + file_path.suffix
|
47 |
-
filePrefix = slugify(sourceName, allow_unicode=True)
|
48 |
-
|
49 |
-
#write to file
|
50 |
-
with open(filePrefix + "-transcript.txt", 'w', encoding="utf-8") as f:
|
51 |
-
f.write(result['text'])
|
52 |
-
|
53 |
-
#subtitles
|
54 |
-
with open(filePrefix + "-subs.srt", 'w', encoding="utf-8") as srt:
|
55 |
-
write_srt(result["segments"], file=srt)
|
56 |
-
|
57 |
-
download = []
|
58 |
-
download.append(filePrefix + "-subs.srt");
|
59 |
-
download.append(filePrefix + "-transcript.txt");
|
60 |
-
|
61 |
-
return download
|
62 |
-
|
63 |
-
async def transcribe_video(video):
|
64 |
-
|
65 |
-
print(type(video))
|
66 |
-
|
67 |
-
with demo:
|
68 |
-
|
69 |
-
gr.Markdown("Choisir le type d'entrée: fichier audio ou fichier vidéo")
|
70 |
-
with gr.Tab("audio"):
|
71 |
-
audio_file = gr.Audio(type="filepath")
|
72 |
-
audio_button = gr.Button("Transcrire audio")
|
73 |
-
with gr.Tab("vidéo"):
|
74 |
-
video_file = gr.Video(type="filepath")
|
75 |
-
video_button = gr.Button("Transcrire vidéo")
|
76 |
-
|
77 |
-
transcript = gr.File(label="transcript")
|
78 |
-
|
79 |
-
audio_button.click(transcribe, inputs=audio_file, outputs=transcript)
|
80 |
-
video_button.click(transcribe_video, inputs=video_file, outputs=transcript)
|
81 |
-
|
82 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/README.md
DELETED
@@ -1,228 +0,0 @@
|
|
1 |
-
<p align="center">
|
2 |
-
<br>
|
3 |
-
<img src="https://github.com/huggingface/diffusers/blob/main/docs/source/en/imgs/diffusers_library.jpg" width="400"/>
|
4 |
-
<br>
|
5 |
-
<p>
|
6 |
-
<p align="center">
|
7 |
-
<a href="https://github.com/huggingface/diffusers/blob/main/LICENSE">
|
8 |
-
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue">
|
9 |
-
</a>
|
10 |
-
<a href="https://github.com/huggingface/diffusers/releases">
|
11 |
-
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/diffusers.svg">
|
12 |
-
</a>
|
13 |
-
<a href="CODE_OF_CONDUCT.md">
|
14 |
-
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg">
|
15 |
-
</a>
|
16 |
-
</p>
|
17 |
-
|
18 |
-
🤗 Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. Whether you're looking for a simple inference solution or training your own diffusion models, 🤗 Diffusers is a modular toolbox that supports both. Our library is designed with a focus on [usability over performance](https://huggingface.co/docs/diffusers/conceptual/philosophy#usability-over-performance), [simple over easy](https://huggingface.co/docs/diffusers/conceptual/philosophy#simple-over-easy), and [customizability over abstractions](https://huggingface.co/docs/diffusers/conceptual/philosophy#tweakable-contributorfriendly-over-abstraction).
|
19 |
-
|
20 |
-
🤗 Diffusers offers three core components:
|
21 |
-
|
22 |
-
- State-of-the-art [diffusion pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview) that can be run in inference with just a few lines of code.
|
23 |
-
- Interchangeable noise [schedulers](https://huggingface.co/docs/diffusers/api/schedulers/overview) for different diffusion speeds and output quality.
|
24 |
-
- Pretrained [models](https://huggingface.co/docs/diffusers/api/models) that can be used as building blocks, and combined with schedulers, for creating your own end-to-end diffusion systems.
|
25 |
-
|
26 |
-
## Installation
|
27 |
-
|
28 |
-
We recommend installing 🤗 Diffusers in a virtual environment from PyPi or Conda. For more details about installing [PyTorch](https://pytorch.org/get-started/locally/) and [Flax](https://flax.readthedocs.io/en/latest/#installation), please refer to their official documentation.
|
29 |
-
|
30 |
-
### PyTorch
|
31 |
-
|
32 |
-
With `pip` (official package):
|
33 |
-
|
34 |
-
```bash
|
35 |
-
pip install --upgrade diffusers[torch]
|
36 |
-
```
|
37 |
-
|
38 |
-
With `conda` (maintained by the community):
|
39 |
-
|
40 |
-
```sh
|
41 |
-
conda install -c conda-forge diffusers
|
42 |
-
```
|
43 |
-
|
44 |
-
### Flax
|
45 |
-
|
46 |
-
With `pip` (official package):
|
47 |
-
|
48 |
-
```bash
|
49 |
-
pip install --upgrade diffusers[flax]
|
50 |
-
```
|
51 |
-
|
52 |
-
### Apple Silicon (M1/M2) support
|
53 |
-
|
54 |
-
Please refer to the [How to use Stable Diffusion in Apple Silicon](https://huggingface.co/docs/diffusers/optimization/mps) guide.
|
55 |
-
|
56 |
-
## Quickstart
|
57 |
-
|
58 |
-
Generating outputs is super easy with 🤗 Diffusers. To generate an image from text, use the `from_pretrained` method to load any pretrained diffusion model (browse the [Hub](https://huggingface.co/models?library=diffusers&sort=downloads) for 4000+ checkpoints):
|
59 |
-
|
60 |
-
```python
|
61 |
-
from diffusers import DiffusionPipeline
|
62 |
-
import torch
|
63 |
-
|
64 |
-
pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
65 |
-
pipeline.to("cuda")
|
66 |
-
pipeline("An image of a squirrel in Picasso style").images[0]
|
67 |
-
```
|
68 |
-
|
69 |
-
You can also dig into the models and schedulers toolbox to build your own diffusion system:
|
70 |
-
|
71 |
-
```python
|
72 |
-
from diffusers import DDPMScheduler, UNet2DModel
|
73 |
-
from PIL import Image
|
74 |
-
import torch
|
75 |
-
import numpy as np
|
76 |
-
|
77 |
-
scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256")
|
78 |
-
model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda")
|
79 |
-
scheduler.set_timesteps(50)
|
80 |
-
|
81 |
-
sample_size = model.config.sample_size
|
82 |
-
noise = torch.randn((1, 3, sample_size, sample_size)).to("cuda")
|
83 |
-
input = noise
|
84 |
-
|
85 |
-
for t in scheduler.timesteps:
|
86 |
-
with torch.no_grad():
|
87 |
-
noisy_residual = model(input, t).sample
|
88 |
-
prev_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample
|
89 |
-
input = prev_noisy_sample
|
90 |
-
|
91 |
-
image = (input / 2 + 0.5).clamp(0, 1)
|
92 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()[0]
|
93 |
-
image = Image.fromarray((image * 255).round().astype("uint8"))
|
94 |
-
image
|
95 |
-
```
|
96 |
-
|
97 |
-
Check out the [Quickstart](https://huggingface.co/docs/diffusers/quicktour) to launch your diffusion journey today!
|
98 |
-
|
99 |
-
## How to navigate the documentation
|
100 |
-
|
101 |
-
| **Documentation** | **What can I learn?** |
|
102 |
-
|---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
103 |
-
| [Tutorial](https://huggingface.co/docs/diffusers/tutorials/tutorial_overview) | A basic crash course for learning how to use the library's most important features like using models and schedulers to build your own diffusion system, and training your own diffusion model. |
|
104 |
-
| [Loading](https://huggingface.co/docs/diffusers/using-diffusers/loading_overview) | Guides for how to load and configure all the components (pipelines, models, and schedulers) of the library, as well as how to use different schedulers. |
|
105 |
-
| [Pipelines for inference](https://huggingface.co/docs/diffusers/using-diffusers/pipeline_overview) | Guides for how to use pipelines for different inference tasks, batched generation, controlling generated outputs and randomness, and how to contribute a pipeline to the library. |
|
106 |
-
| [Optimization](https://huggingface.co/docs/diffusers/optimization/opt_overview) | Guides for how to optimize your diffusion model to run faster and consume less memory. |
|
107 |
-
| [Training](https://huggingface.co/docs/diffusers/training/overview) | Guides for how to train a diffusion model for different tasks with different training techniques. |
|
108 |
-
## Contribution
|
109 |
-
|
110 |
-
We ❤️ contributions from the open-source community!
|
111 |
-
If you want to contribute to this library, please check out our [Contribution guide](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md).
|
112 |
-
You can look out for [issues](https://github.com/huggingface/diffusers/issues) you'd like to tackle to contribute to the library.
|
113 |
-
- See [Good first issues](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) for general opportunities to contribute
|
114 |
-
- See [New model/pipeline](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+pipeline%2Fmodel%22) to contribute exciting new diffusion models / diffusion pipelines
|
115 |
-
- See [New scheduler](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22New+scheduler%22)
|
116 |
-
|
117 |
-
Also, say 👋 in our public Discord channel <a href="https://discord.gg/G7tWnz98XR"><img alt="Join us on Discord" src="https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white"></a>. We discuss the hottest trends about diffusion models, help each other with contributions, personal projects or
|
118 |
-
just hang out ☕.
|
119 |
-
|
120 |
-
|
121 |
-
## Popular Tasks & Pipelines
|
122 |
-
|
123 |
-
<table>
|
124 |
-
<tr>
|
125 |
-
<th>Task</th>
|
126 |
-
<th>Pipeline</th>
|
127 |
-
<th>🤗 Hub</th>
|
128 |
-
</tr>
|
129 |
-
<tr style="border-top: 2px solid black">
|
130 |
-
<td>Unconditional Image Generation</td>
|
131 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/ddpm"> DDPM </a></td>
|
132 |
-
<td><a href="https://huggingface.co/google/ddpm-ema-church-256"> google/ddpm-ema-church-256 </a></td>
|
133 |
-
</tr>
|
134 |
-
<tr style="border-top: 2px solid black">
|
135 |
-
<td>Text-to-Image</td>
|
136 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img">Stable Diffusion Text-to-Image</a></td>
|
137 |
-
<td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td>
|
138 |
-
</tr>
|
139 |
-
<tr>
|
140 |
-
<td>Text-to-Image</td>
|
141 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/unclip">unclip</a></td>
|
142 |
-
<td><a href="https://huggingface.co/kakaobrain/karlo-v1-alpha"> kakaobrain/karlo-v1-alpha </a></td>
|
143 |
-
</tr>
|
144 |
-
<tr>
|
145 |
-
<td>Text-to-Image</td>
|
146 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/if">DeepFloyd IF</a></td>
|
147 |
-
<td><a href="https://huggingface.co/DeepFloyd/IF-I-XL-v1.0"> DeepFloyd/IF-I-XL-v1.0 </a></td>
|
148 |
-
</tr>
|
149 |
-
<tr>
|
150 |
-
<td>Text-to-Image</td>
|
151 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/kandinsky">Kandinsky</a></td>
|
152 |
-
<td><a href="https://huggingface.co/kandinsky-community/kandinsky-2-2-decoder"> kandinsky-community/kandinsky-2-2-decoder </a></td>
|
153 |
-
</tr>
|
154 |
-
<tr style="border-top: 2px solid black">
|
155 |
-
<td>Text-guided Image-to-Image</td>
|
156 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/controlnet">Controlnet</a></td>
|
157 |
-
<td><a href="https://huggingface.co/lllyasviel/sd-controlnet-canny"> lllyasviel/sd-controlnet-canny </a></td>
|
158 |
-
</tr>
|
159 |
-
<tr>
|
160 |
-
<td>Text-guided Image-to-Image</td>
|
161 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/pix2pix">Instruct Pix2Pix</a></td>
|
162 |
-
<td><a href="https://huggingface.co/timbrooks/instruct-pix2pix"> timbrooks/instruct-pix2pix </a></td>
|
163 |
-
</tr>
|
164 |
-
<tr>
|
165 |
-
<td>Text-guided Image-to-Image</td>
|
166 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/img2img">Stable Diffusion Image-to-Image</a></td>
|
167 |
-
<td><a href="https://huggingface.co/runwayml/stable-diffusion-v1-5"> runwayml/stable-diffusion-v1-5 </a></td>
|
168 |
-
</tr>
|
169 |
-
<tr style="border-top: 2px solid black">
|
170 |
-
<td>Text-guided Image Inpainting</td>
|
171 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/inpaint">Stable Diffusion Inpaint</a></td>
|
172 |
-
<td><a href="https://huggingface.co/runwayml/stable-diffusion-inpainting"> runwayml/stable-diffusion-inpainting </a></td>
|
173 |
-
</tr>
|
174 |
-
<tr style="border-top: 2px solid black">
|
175 |
-
<td>Image Variation</td>
|
176 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/image_variation">Stable Diffusion Image Variation</a></td>
|
177 |
-
<td><a href="https://huggingface.co/lambdalabs/sd-image-variations-diffusers"> lambdalabs/sd-image-variations-diffusers </a></td>
|
178 |
-
</tr>
|
179 |
-
<tr style="border-top: 2px solid black">
|
180 |
-
<td>Super Resolution</td>
|
181 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/upscale">Stable Diffusion Upscale</a></td>
|
182 |
-
<td><a href="https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler"> stabilityai/stable-diffusion-x4-upscaler </a></td>
|
183 |
-
</tr>
|
184 |
-
<tr>
|
185 |
-
<td>Super Resolution</td>
|
186 |
-
<td><a href="https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/latent_upscale">Stable Diffusion Latent Upscale</a></td>
|
187 |
-
<td><a href="https://huggingface.co/stabilityai/sd-x2-latent-upscaler"> stabilityai/sd-x2-latent-upscaler </a></td>
|
188 |
-
</tr>
|
189 |
-
</table>
|
190 |
-
|
191 |
-
## Popular libraries using 🧨 Diffusers
|
192 |
-
|
193 |
-
- https://github.com/microsoft/TaskMatrix
|
194 |
-
- https://github.com/invoke-ai/InvokeAI
|
195 |
-
- https://github.com/apple/ml-stable-diffusion
|
196 |
-
- https://github.com/Sanster/lama-cleaner
|
197 |
-
- https://github.com/IDEA-Research/Grounded-Segment-Anything
|
198 |
-
- https://github.com/ashawkey/stable-dreamfusion
|
199 |
-
- https://github.com/deep-floyd/IF
|
200 |
-
- https://github.com/bentoml/BentoML
|
201 |
-
- https://github.com/bmaltais/kohya_ss
|
202 |
-
- +3000 other amazing GitHub repositories 💪
|
203 |
-
|
204 |
-
Thank you for using us ❤️
|
205 |
-
|
206 |
-
## Credits
|
207 |
-
|
208 |
-
This library concretizes previous work by many different authors and would not have been possible without their great research and implementations. We'd like to thank, in particular, the following implementations which have helped us in our development and without which the API could not have been as polished today:
|
209 |
-
|
210 |
-
- @CompVis' latent diffusion models library, available [here](https://github.com/CompVis/latent-diffusion)
|
211 |
-
- @hojonathanho original DDPM implementation, available [here](https://github.com/hojonathanho/diffusion) as well as the extremely useful translation into PyTorch by @pesser, available [here](https://github.com/pesser/pytorch_diffusion)
|
212 |
-
- @ermongroup's DDIM implementation, available [here](https://github.com/ermongroup/ddim)
|
213 |
-
- @yang-song's Score-VE and Score-VP implementations, available [here](https://github.com/yang-song/score_sde_pytorch)
|
214 |
-
|
215 |
-
We also want to thank @heejkoo for the very helpful overview of papers, code and resources on diffusion models, available [here](https://github.com/heejkoo/Awesome-Diffusion-Models) as well as @crowsonkb and @rromb for useful discussions and insights.
|
216 |
-
|
217 |
-
## Citation
|
218 |
-
|
219 |
-
```bibtex
|
220 |
-
@misc{von-platen-etal-2022-diffusers,
|
221 |
-
author = {Patrick von Platen and Suraj Patil and Anton Lozhkov and Pedro Cuenca and Nathan Lambert and Kashif Rasul and Mishig Davaadorj and Thomas Wolf},
|
222 |
-
title = {Diffusers: State-of-the-art diffusion models},
|
223 |
-
year = {2022},
|
224 |
-
publisher = {GitHub},
|
225 |
-
journal = {GitHub repository},
|
226 |
-
howpublished = {\url{https://github.com/huggingface/diffusers}}
|
227 |
-
}
|
228 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/nasfcos_head.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
|
3 |
-
import torch.nn as nn
|
4 |
-
from mmcv.cnn import (ConvModule, Scale, bias_init_with_prob,
|
5 |
-
caffe2_xavier_init, normal_init)
|
6 |
-
|
7 |
-
from mmdet.models.dense_heads.fcos_head import FCOSHead
|
8 |
-
from ..builder import HEADS
|
9 |
-
|
10 |
-
|
11 |
-
@HEADS.register_module()
|
12 |
-
class NASFCOSHead(FCOSHead):
|
13 |
-
"""Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.
|
14 |
-
|
15 |
-
It is quite similar with FCOS head, except for the searched structure of
|
16 |
-
classification branch and bbox regression branch, where a structure of
|
17 |
-
"dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
|
18 |
-
"""
|
19 |
-
|
20 |
-
def _init_layers(self):
|
21 |
-
"""Initialize layers of the head."""
|
22 |
-
dconv3x3_config = dict(
|
23 |
-
type='DCNv2',
|
24 |
-
kernel_size=3,
|
25 |
-
use_bias=True,
|
26 |
-
deform_groups=2,
|
27 |
-
padding=1)
|
28 |
-
conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
|
29 |
-
conv1x1_config = dict(type='Conv', kernel_size=1)
|
30 |
-
|
31 |
-
self.arch_config = [
|
32 |
-
dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
|
33 |
-
]
|
34 |
-
self.cls_convs = nn.ModuleList()
|
35 |
-
self.reg_convs = nn.ModuleList()
|
36 |
-
for i, op_ in enumerate(self.arch_config):
|
37 |
-
op = copy.deepcopy(op_)
|
38 |
-
chn = self.in_channels if i == 0 else self.feat_channels
|
39 |
-
assert isinstance(op, dict)
|
40 |
-
use_bias = op.pop('use_bias', False)
|
41 |
-
padding = op.pop('padding', 0)
|
42 |
-
kernel_size = op.pop('kernel_size')
|
43 |
-
module = ConvModule(
|
44 |
-
chn,
|
45 |
-
self.feat_channels,
|
46 |
-
kernel_size,
|
47 |
-
stride=1,
|
48 |
-
padding=padding,
|
49 |
-
norm_cfg=self.norm_cfg,
|
50 |
-
bias=use_bias,
|
51 |
-
conv_cfg=op)
|
52 |
-
|
53 |
-
self.cls_convs.append(copy.deepcopy(module))
|
54 |
-
self.reg_convs.append(copy.deepcopy(module))
|
55 |
-
|
56 |
-
self.conv_cls = nn.Conv2d(
|
57 |
-
self.feat_channels, self.cls_out_channels, 3, padding=1)
|
58 |
-
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
|
59 |
-
self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
|
60 |
-
|
61 |
-
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
|
62 |
-
|
63 |
-
def init_weights(self):
|
64 |
-
"""Initialize weights of the head."""
|
65 |
-
# retinanet_bias_init
|
66 |
-
bias_cls = bias_init_with_prob(0.01)
|
67 |
-
normal_init(self.conv_reg, std=0.01)
|
68 |
-
normal_init(self.conv_centerness, std=0.01)
|
69 |
-
normal_init(self.conv_cls, std=0.01, bias=bias_cls)
|
70 |
-
|
71 |
-
for branch in [self.cls_convs, self.reg_convs]:
|
72 |
-
for module in branch.modules():
|
73 |
-
if isinstance(module, ConvModule) \
|
74 |
-
and isinstance(module.conv, nn.Conv2d):
|
75 |
-
caffe2_xavier_init(module.conv)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/utils/contextmanagers.py
DELETED
@@ -1,121 +0,0 @@
|
|
1 |
-
import asyncio
|
2 |
-
import contextlib
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
import time
|
6 |
-
from typing import List
|
7 |
-
|
8 |
-
import torch
|
9 |
-
|
10 |
-
logger = logging.getLogger(__name__)
|
11 |
-
|
12 |
-
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
|
13 |
-
|
14 |
-
|
15 |
-
@contextlib.asynccontextmanager
|
16 |
-
async def completed(trace_name='',
|
17 |
-
name='',
|
18 |
-
sleep_interval=0.05,
|
19 |
-
streams: List[torch.cuda.Stream] = None):
|
20 |
-
"""Async context manager that waits for work to complete on given CUDA
|
21 |
-
streams."""
|
22 |
-
if not torch.cuda.is_available():
|
23 |
-
yield
|
24 |
-
return
|
25 |
-
|
26 |
-
stream_before_context_switch = torch.cuda.current_stream()
|
27 |
-
if not streams:
|
28 |
-
streams = [stream_before_context_switch]
|
29 |
-
else:
|
30 |
-
streams = [s if s else stream_before_context_switch for s in streams]
|
31 |
-
|
32 |
-
end_events = [
|
33 |
-
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
|
34 |
-
]
|
35 |
-
|
36 |
-
if DEBUG_COMPLETED_TIME:
|
37 |
-
start = torch.cuda.Event(enable_timing=True)
|
38 |
-
stream_before_context_switch.record_event(start)
|
39 |
-
|
40 |
-
cpu_start = time.monotonic()
|
41 |
-
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
|
42 |
-
grad_enabled_before = torch.is_grad_enabled()
|
43 |
-
try:
|
44 |
-
yield
|
45 |
-
finally:
|
46 |
-
current_stream = torch.cuda.current_stream()
|
47 |
-
assert current_stream == stream_before_context_switch
|
48 |
-
|
49 |
-
if DEBUG_COMPLETED_TIME:
|
50 |
-
cpu_end = time.monotonic()
|
51 |
-
for i, stream in enumerate(streams):
|
52 |
-
event = end_events[i]
|
53 |
-
stream.record_event(event)
|
54 |
-
|
55 |
-
grad_enabled_after = torch.is_grad_enabled()
|
56 |
-
|
57 |
-
# observed change of torch.is_grad_enabled() during concurrent run of
|
58 |
-
# async_test_bboxes code
|
59 |
-
assert (grad_enabled_before == grad_enabled_after
|
60 |
-
), 'Unexpected is_grad_enabled() value change'
|
61 |
-
|
62 |
-
are_done = [e.query() for e in end_events]
|
63 |
-
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
|
64 |
-
are_done, streams)
|
65 |
-
with torch.cuda.stream(stream_before_context_switch):
|
66 |
-
while not all(are_done):
|
67 |
-
await asyncio.sleep(sleep_interval)
|
68 |
-
are_done = [e.query() for e in end_events]
|
69 |
-
logger.debug(
|
70 |
-
'%s %s completed: %s streams: %s',
|
71 |
-
trace_name,
|
72 |
-
name,
|
73 |
-
are_done,
|
74 |
-
streams,
|
75 |
-
)
|
76 |
-
|
77 |
-
current_stream = torch.cuda.current_stream()
|
78 |
-
assert current_stream == stream_before_context_switch
|
79 |
-
|
80 |
-
if DEBUG_COMPLETED_TIME:
|
81 |
-
cpu_time = (cpu_end - cpu_start) * 1000
|
82 |
-
stream_times_ms = ''
|
83 |
-
for i, stream in enumerate(streams):
|
84 |
-
elapsed_time = start.elapsed_time(end_events[i])
|
85 |
-
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
|
86 |
-
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
|
87 |
-
stream_times_ms)
|
88 |
-
|
89 |
-
|
90 |
-
@contextlib.asynccontextmanager
|
91 |
-
async def concurrent(streamqueue: asyncio.Queue,
|
92 |
-
trace_name='concurrent',
|
93 |
-
name='stream'):
|
94 |
-
"""Run code concurrently in different streams.
|
95 |
-
|
96 |
-
:param streamqueue: asyncio.Queue instance.
|
97 |
-
|
98 |
-
Queue tasks define the pool of streams used for concurrent execution.
|
99 |
-
"""
|
100 |
-
if not torch.cuda.is_available():
|
101 |
-
yield
|
102 |
-
return
|
103 |
-
|
104 |
-
initial_stream = torch.cuda.current_stream()
|
105 |
-
|
106 |
-
with torch.cuda.stream(initial_stream):
|
107 |
-
stream = await streamqueue.get()
|
108 |
-
assert isinstance(stream, torch.cuda.Stream)
|
109 |
-
|
110 |
-
try:
|
111 |
-
with torch.cuda.stream(stream):
|
112 |
-
logger.debug('%s %s is starting, stream: %s', trace_name, name,
|
113 |
-
stream)
|
114 |
-
yield
|
115 |
-
current = torch.cuda.current_stream()
|
116 |
-
assert current == stream
|
117 |
-
logger.debug('%s %s has finished, stream: %s', trace_name,
|
118 |
-
name, stream)
|
119 |
-
finally:
|
120 |
-
streamqueue.task_done()
|
121 |
-
streamqueue.put_nowait(stream)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/point_rend/pointrend_r50_512x1024_80k_cityscapes.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/pointrend_r50.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
5 |
-
lr_config = dict(warmup='linear', warmup_iters=200)
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/clipboard/clipboard.min.js
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
/*!
|
2 |
-
* clipboard.js v2.0.11
|
3 |
-
* https://clipboardjs.com/
|
4 |
-
*
|
5 |
-
* Licensed MIT © Zeno Rocha
|
6 |
-
*/
|
7 |
-
!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return b}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),r=n.n(e);function c(t){try{return document.execCommand(t)}catch(t){return}}var a=function(t){t=r()(t);return c("cut"),t};function o(t,e){var n,o,t=(n=t,o="rtl"===document.documentElement.getAttribute("dir"),(t=document.createElement("textarea")).style.fontSize="12pt",t.style.border="0",t.style.padding="0",t.style.margin="0",t.style.position="absolute",t.style[o?"right":"left"]="-9999px",o=window.pageYOffset||document.documentElement.scrollTop,t.style.top="".concat(o,"px"),t.setAttribute("readonly",""),t.value=n,t);return e.container.appendChild(t),e=r()(t),c("copy"),t.remove(),e}var f=function(t){var e=1<arguments.length&&void 0!==arguments[1]?arguments[1]:{container:document.body},n="";return"string"==typeof t?n=o(t,e):t instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(null==t?void 0:t.type)?n=o(t.value,e):(n=r()(t),c("copy")),n};function l(t){return(l="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}var s=function(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:{},e=t.action,n=void 0===e?"copy":e,o=t.container,e=t.target,t=t.text;if("copy"!==n&&"cut"!==n)throw new Error('Invalid "action" value, use either "copy" or "cut"');if(void 0!==e){if(!e||"object"!==l(e)||1!==e.nodeType)throw new Error('Invalid "target" value, use a valid Element');if("copy"===n&&e.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if("cut"===n&&(e.hasAttribute("readonly")||e.hasAttribute("disabled")))throw new Error('Invalid "target" attribute. You can\'t cut text from elements with "readonly" or "disabled" attributes')}return t?f(t,{container:o}):e?"cut"===n?a(e):f(e,{container:o}):void 0};function p(t){return(p="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function d(t,e){for(var n=0;n<e.length;n++){var o=e[n];o.enumerable=o.enumerable||!1,o.configurable=!0,"value"in o&&(o.writable=!0),Object.defineProperty(t,o.key,o)}}function y(t,e){return(y=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function h(n){var o=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(t){return!1}}();return function(){var t,e=v(n);return t=o?(t=v(this).constructor,Reflect.construct(e,arguments,t)):e.apply(this,arguments),e=this,!(t=t)||"object"!==p(t)&&"function"!=typeof t?function(t){if(void 0!==t)return t;throw new ReferenceError("this hasn't been initialised - super() hasn't been called")}(e):t}}function v(t){return(v=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}function m(t,e){t="data-clipboard-".concat(t);if(e.hasAttribute(t))return e.getAttribute(t)}var b=function(){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&y(t,e)}(r,i());var t,e,n,o=h(r);function r(t,e){var n;return function(t){if(!(t instanceof r))throw new TypeError("Cannot call a class as a function")}(this),(n=o.call(this)).resolveOptions(e),n.listenClick(t),n}return t=r,n=[{key:"copy",value:function(t){var e=1<arguments.length&&void 0!==arguments[1]?arguments[1]:{container:document.body};return f(t,e)}},{key:"cut",value:function(t){return a(t)}},{key:"isSupported",value:function(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:["copy","cut"],t="string"==typeof t?[t]:t,e=!!document.queryCommandSupported;return t.forEach(function(t){e=e&&!!document.queryCommandSupported(t)}),e}}],(e=[{key:"resolveOptions",value:function(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:{};this.action="function"==typeof t.action?t.action:this.defaultAction,this.target="function"==typeof t.target?t.target:this.defaultTarget,this.text="function"==typeof t.text?t.text:this.defaultText,this.container="object"===p(t.container)?t.container:document.body}},{key:"listenClick",value:function(t){var e=this;this.listener=u()(t,"click",function(t){return e.onClick(t)})}},{key:"onClick",value:function(t){var e=t.delegateTarget||t.currentTarget,n=this.action(e)||"copy",t=s({action:n,container:this.container,target:this.target(e),text:this.text(e)});this.emit(t?"success":"error",{action:n,text:t,trigger:e,clearSelection:function(){e&&e.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(t){return m("action",t)}},{key:"defaultTarget",value:function(t){t=m("target",t);if(t)return document.querySelector(t)}},{key:"defaultText",value:function(t){return m("text",t)}},{key:"destroy",value:function(){this.listener.destroy()}}])&&d(t.prototype,e),n&&d(t,n),r}()},828:function(t){var e;"undefined"==typeof Element||Element.prototype.matches||((e=Element.prototype).matches=e.matchesSelector||e.mozMatchesSelector||e.msMatchesSelector||e.oMatchesSelector||e.webkitMatchesSelector),t.exports=function(t,e){for(;t&&9!==t.nodeType;){if("function"==typeof t.matches&&t.matches(e))return t;t=t.parentNode}}},438:function(t,e,n){var u=n(828);function i(t,e,n,o,r){var i=function(e,n,t,o){return function(t){t.delegateTarget=u(t.target,n),t.delegateTarget&&o.call(e,t)}}.apply(this,arguments);return t.addEventListener(n,i,r),{destroy:function(){t.removeEventListener(n,i,r)}}}t.exports=function(t,e,n,o,r){return"function"==typeof t.addEventListener?i.apply(null,arguments):"function"==typeof n?i.bind(null,document).apply(null,arguments):("string"==typeof t&&(t=document.querySelectorAll(t)),Array.prototype.map.call(t,function(t){return i(t,e,n,o,r)}))}},879:function(t,n){n.node=function(t){return void 0!==t&&t instanceof HTMLElement&&1===t.nodeType},n.nodeList=function(t){var e=Object.prototype.toString.call(t);return void 0!==t&&("[object NodeList]"===e||"[object HTMLCollection]"===e)&&"length"in t&&(0===t.length||n.node(t[0]))},n.string=function(t){return"string"==typeof t||t instanceof String},n.fn=function(t){return"[object Function]"===Object.prototype.toString.call(t)}},370:function(t,e,n){var f=n(879),l=n(438);t.exports=function(t,e,n){if(!t&&!e&&!n)throw new Error("Missing required arguments");if(!f.string(e))throw new TypeError("Second argument must be a String");if(!f.fn(n))throw new TypeError("Third argument must be a Function");if(f.node(t))return c=e,a=n,(u=t).addEventListener(c,a),{destroy:function(){u.removeEventListener(c,a)}};if(f.nodeList(t))return o=t,r=e,i=n,Array.prototype.forEach.call(o,function(t){t.addEventListener(r,i)}),{destroy:function(){Array.prototype.forEach.call(o,function(t){t.removeEventListener(r,i)})}};if(f.string(t))return t=t,e=e,n=n,l(document.body,t,e,n);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList");var o,r,i,u,c,a}},817:function(t){t.exports=function(t){var e,n="SELECT"===t.nodeName?(t.focus(),t.value):"INPUT"===t.nodeName||"TEXTAREA"===t.nodeName?((e=t.hasAttribute("readonly"))||t.setAttribute("readonly",""),t.select(),t.setSelectionRange(0,t.value.length),e||t.removeAttribute("readonly"),t.value):(t.hasAttribute("contenteditable")&&t.focus(),n=window.getSelection(),(e=document.createRange()).selectNodeContents(t),n.removeAllRanges(),n.addRange(e),n.toString());return n}},279:function(t){function e(){}e.prototype={on:function(t,e,n){var o=this.e||(this.e={});return(o[t]||(o[t]=[])).push({fn:e,ctx:n}),this},once:function(t,e,n){var o=this;function r(){o.off(t,r),e.apply(n,arguments)}return r._=e,this.on(t,r,n)},emit:function(t){for(var e=[].slice.call(arguments,1),n=((this.e||(this.e={}))[t]||[]).slice(),o=0,r=n.length;o<r;o++)n[o].fn.apply(n[o].ctx,e);return this},off:function(t,e){var n=this.e||(this.e={}),o=n[t],r=[];if(o&&e)for(var i=0,u=o.length;i<u;i++)o[i].fn!==e&&o[i].fn._!==e&&r.push(o[i]);return r.length?n[t]=r:delete n[t],this}},t.exports=e,t.exports.TinyEmitter=e}},r={},o.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return o.d(e,{a:e}),e},o.d=function(t,e){for(var n in e)o.o(e,n)&&!o.o(t,n)&&Object.defineProperty(t,n,{enumerable:!0,get:e[n]})},o.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},o(686).default;function o(t){if(r[t])return r[t].exports;var e=r[t]={exports:{}};return n[t](e,e.exports,o),e.exports}var n,r});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ankita0512ghosh/Weather_bot/app.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import streamlit as st
|
3 |
-
import json
|
4 |
-
|
5 |
-
def get_weather(city):
|
6 |
-
"""Gets the current weather forecast for the given city."""
|
7 |
-
|
8 |
-
# Get the API key from OpenWeatherMap.
|
9 |
-
API_KEY = "58bb081f22fea521a4a3cd7ccb24aa88"
|
10 |
-
|
11 |
-
# Make a request to the OpenWeatherMap API.
|
12 |
-
response = requests.get(
|
13 |
-
"https://api.openweathermap.org/data/2.5/weather?q={}&appid={}".format(city, API_KEY)
|
14 |
-
)
|
15 |
-
|
16 |
-
# Check for errors.
|
17 |
-
if response.status_code != 200:
|
18 |
-
raise Exception("Error getting weather data: {}".format(response.status_code))
|
19 |
-
|
20 |
-
# Parse the JSON response.
|
21 |
-
weather_data = json.loads(response.content.decode("utf-8"))
|
22 |
-
|
23 |
-
# Return the current weather forecast.
|
24 |
-
return weather_data["weather"][0]["description"], weather_data["main"]["temp"], weather_data["main"]["pressure"], weather_data["main"]["humidity"]
|
25 |
-
|
26 |
-
#main function
|
27 |
-
if __name__ == "__main__":
|
28 |
-
|
29 |
-
# Create a title for the app.
|
30 |
-
st.title("Weather Forecast")
|
31 |
-
|
32 |
-
# Get the city name from the user.
|
33 |
-
city = st.text_input("Enter a city name: ")
|
34 |
-
|
35 |
-
# Show the weather forecast for the city.
|
36 |
-
if city:
|
37 |
-
weather_description, temperature, pressure, humidity = get_weather(city)
|
38 |
-
|
39 |
-
# Add a background image.
|
40 |
-
st.markdown(f"""<style>.stApp {{
|
41 |
-
background-image: url("https://images.unsplash.com/photo-1474540412665-1cdae210ae6b?ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxzZWFyY2h8Mnx8Y2FsbXxlbnwwfHwwfHx8MA%3D%3D&w=1000&q=80");
|
42 |
-
background-attachment: fixed;
|
43 |
-
background-size: cover
|
44 |
-
}}</style>""",unsafe_allow_html=True)
|
45 |
-
|
46 |
-
# Add a heading.
|
47 |
-
st.header("Weather in **{}** ".format(city))
|
48 |
-
|
49 |
-
# Add a paragraph.
|
50 |
-
st.markdown("The weather in **{}** is **{}** and the temperature is **{}** Kelvin Unit.".format(city, weather_description, temperature))
|
51 |
-
|
52 |
-
col1, col2, col3 = st.columns(3)
|
53 |
-
|
54 |
-
# Add a button to convert the temperature to Celsius.
|
55 |
-
with col1:
|
56 |
-
convert_to_celsius = st.button("Convert to Celsius")
|
57 |
-
|
58 |
-
if convert_to_celsius:
|
59 |
-
temperature_in_celsius = float("{:.2f}".format(temperature - 273.15))
|
60 |
-
st.markdown(
|
61 |
-
f"""
|
62 |
-
The temperature in **{city}** is **{weather_description}** and the temperature is **{temperature_in_celsius}** degrees Celsius.
|
63 |
-
"""
|
64 |
-
)
|
65 |
-
|
66 |
-
#Add button to convert the temperature to Fahrenheit
|
67 |
-
with col2:
|
68 |
-
convert_to_fahrenheit = st.button("Convert to Fahrenheit")
|
69 |
-
|
70 |
-
if convert_to_fahrenheit:
|
71 |
-
temperature_in_fahrenheit = float("{:.2f}".format((temperature - 273.15) * 9 / 5 + 32))
|
72 |
-
st.markdown(
|
73 |
-
f"""
|
74 |
-
The temperature in **{city}** is **{weather_description}** and the temperature is **{temperature_in_fahrenheit}** degrees Fahrenheit.
|
75 |
-
"""
|
76 |
-
)
|
77 |
-
|
78 |
-
#Add pressure and humidity
|
79 |
-
with col3:
|
80 |
-
p_and_h = st.button("Pressure and Humidity")
|
81 |
-
|
82 |
-
if p_and_h:
|
83 |
-
st.markdown("The pressure is **{}** hPa and the humidity is **{}**%.".format(pressure, humidity))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aphrodite/stable-diffusion-2/app.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
|
2 |
-
import gradio as gr
|
3 |
-
import torch
|
4 |
-
from PIL import Image
|
5 |
-
|
6 |
-
model_id = 'stabilityai/stable-diffusion-2'
|
7 |
-
prefix = ''
|
8 |
-
|
9 |
-
scheduler = DPMSolverMultistepScheduler(
|
10 |
-
beta_start=0.00085,
|
11 |
-
beta_end=0.012,
|
12 |
-
beta_schedule="scaled_linear",
|
13 |
-
num_train_timesteps=1000,
|
14 |
-
trained_betas=None,
|
15 |
-
predict_epsilon=True,
|
16 |
-
thresholding=True,
|
17 |
-
algorithm_type="dpmsolver++",
|
18 |
-
solver_type="midpoint",
|
19 |
-
lower_order_final=True,
|
20 |
-
)
|
21 |
-
|
22 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
23 |
-
model_id,
|
24 |
-
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
25 |
-
scheduler=scheduler)
|
26 |
-
|
27 |
-
pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
|
28 |
-
model_id,
|
29 |
-
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
30 |
-
scheduler=scheduler)
|
31 |
-
|
32 |
-
if torch.cuda.is_available():
|
33 |
-
pipe = pipe.to("cuda")
|
34 |
-
pipe_i2i = pipe_i2i.to("cuda")
|
35 |
-
|
36 |
-
def error_str(error, title="Error"):
|
37 |
-
return f"""#### {title}
|
38 |
-
{error}""" if error else ""
|
39 |
-
|
40 |
-
def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=True):
|
41 |
-
|
42 |
-
generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
|
43 |
-
prompt = f"{prefix} {prompt}" if auto_prefix else prompt
|
44 |
-
|
45 |
-
try:
|
46 |
-
if img is not None:
|
47 |
-
return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
|
48 |
-
else:
|
49 |
-
return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
|
50 |
-
except Exception as e:
|
51 |
-
return None, error_str(e)
|
52 |
-
|
53 |
-
def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
|
54 |
-
|
55 |
-
result = pipe(
|
56 |
-
prompt,
|
57 |
-
negative_prompt = neg_prompt,
|
58 |
-
num_inference_steps = int(steps),
|
59 |
-
guidance_scale = guidance,
|
60 |
-
width = width,
|
61 |
-
height = height,
|
62 |
-
generator = generator)
|
63 |
-
|
64 |
-
return replace_nsfw_images(result)
|
65 |
-
|
66 |
-
def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
|
67 |
-
|
68 |
-
ratio = min(height / img.height, width / img.width)
|
69 |
-
img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
|
70 |
-
result = pipe_i2i(
|
71 |
-
prompt,
|
72 |
-
negative_prompt = neg_prompt,
|
73 |
-
init_image = img,
|
74 |
-
num_inference_steps = int(steps),
|
75 |
-
strength = strength,
|
76 |
-
guidance_scale = guidance,
|
77 |
-
width = width,
|
78 |
-
height = height,
|
79 |
-
generator = generator)
|
80 |
-
|
81 |
-
return replace_nsfw_images(result)
|
82 |
-
|
83 |
-
def replace_nsfw_images(results):
|
84 |
-
|
85 |
-
for i in range(len(results.images)):
|
86 |
-
if results.nsfw_content_detected[i]:
|
87 |
-
results.images[i] = Image.open("nsfw.png")
|
88 |
-
return results.images[0]
|
89 |
-
|
90 |
-
css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
|
91 |
-
"""
|
92 |
-
with gr.Blocks(css=css) as demo:
|
93 |
-
gr.HTML(
|
94 |
-
f"""
|
95 |
-
<div class="main-div">
|
96 |
-
<div>
|
97 |
-
<h1>Stable Diffusion 2</h1>
|
98 |
-
</div>
|
99 |
-
<p>
|
100 |
-
Demo for <a href="https://huggingface.co/stabilityai/stable-diffusion-2">Stable Diffusion 2</a> Stable Diffusion model.<br>
|
101 |
-
Add the following tokens to your prompts for the model to work properly: <b></b>.
|
102 |
-
</p>
|
103 |
-
Running on <b>{"GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"}</b>
|
104 |
-
</div>
|
105 |
-
"""
|
106 |
-
)
|
107 |
-
with gr.Row():
|
108 |
-
|
109 |
-
with gr.Column(scale=55):
|
110 |
-
with gr.Group():
|
111 |
-
with gr.Row():
|
112 |
-
prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder=f"{prefix} [your prompt]").style(container=False)
|
113 |
-
generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
|
114 |
-
|
115 |
-
image_out = gr.Image(height=512)
|
116 |
-
error_output = gr.Markdown()
|
117 |
-
|
118 |
-
with gr.Column(scale=45):
|
119 |
-
with gr.Tab("Options"):
|
120 |
-
with gr.Group():
|
121 |
-
neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
|
122 |
-
auto_prefix = gr.Checkbox(label="Prefix styling tokens automatically ()", value=True)
|
123 |
-
|
124 |
-
with gr.Row():
|
125 |
-
guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
|
126 |
-
steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
|
127 |
-
|
128 |
-
with gr.Row():
|
129 |
-
width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
|
130 |
-
height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
|
131 |
-
|
132 |
-
seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
|
133 |
-
|
134 |
-
with gr.Tab("Image to image"):
|
135 |
-
with gr.Group():
|
136 |
-
image = gr.Image(label="Image", height=256, tool="editor", type="pil")
|
137 |
-
strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
|
138 |
-
|
139 |
-
auto_prefix.change(lambda x: gr.update(placeholder=f"{prefix} [your prompt]" if x else "[Your prompt]"), inputs=auto_prefix, outputs=prompt, queue=False)
|
140 |
-
|
141 |
-
inputs = [prompt, guidance, steps, width, height, seed, image, strength, neg_prompt, auto_prefix]
|
142 |
-
outputs = [image_out, error_output]
|
143 |
-
prompt.submit(inference, inputs=inputs, outputs=outputs)
|
144 |
-
generate.click(inference, inputs=inputs, outputs=outputs)
|
145 |
-
|
146 |
-
gr.HTML("""
|
147 |
-
<div style="border-top: 1px solid #303030;">
|
148 |
-
<br>
|
149 |
-
<p>This space was created using <a href="https://huggingface.co/spaces/anzorq/sd-space-creator">SD Space Creator</a>.</p>
|
150 |
-
</div>
|
151 |
-
""")
|
152 |
-
|
153 |
-
demo.queue(concurrency_count=1)
|
154 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AquaSuisei/ChatGPTXE/modules/utils.py
DELETED
@@ -1,536 +0,0 @@
|
|
1 |
-
# -*- coding:utf-8 -*-
|
2 |
-
from __future__ import annotations
|
3 |
-
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
|
4 |
-
import logging
|
5 |
-
import json
|
6 |
-
import os
|
7 |
-
import datetime
|
8 |
-
import hashlib
|
9 |
-
import csv
|
10 |
-
import requests
|
11 |
-
import re
|
12 |
-
import html
|
13 |
-
import sys
|
14 |
-
import subprocess
|
15 |
-
|
16 |
-
import gradio as gr
|
17 |
-
from pypinyin import lazy_pinyin
|
18 |
-
import tiktoken
|
19 |
-
import mdtex2html
|
20 |
-
from markdown import markdown
|
21 |
-
from pygments import highlight
|
22 |
-
from pygments.lexers import get_lexer_by_name
|
23 |
-
from pygments.formatters import HtmlFormatter
|
24 |
-
import pandas as pd
|
25 |
-
|
26 |
-
from modules.presets import *
|
27 |
-
from . import shared
|
28 |
-
from modules.config import retrieve_proxy
|
29 |
-
|
30 |
-
if TYPE_CHECKING:
|
31 |
-
from typing import TypedDict
|
32 |
-
|
33 |
-
class DataframeData(TypedDict):
|
34 |
-
headers: List[str]
|
35 |
-
data: List[List[str | int | bool]]
|
36 |
-
|
37 |
-
|
38 |
-
def count_token(message):
|
39 |
-
encoding = tiktoken.get_encoding("cl100k_base")
|
40 |
-
input_str = f"role: {message['role']}, content: {message['content']}"
|
41 |
-
length = len(encoding.encode(input_str))
|
42 |
-
return length
|
43 |
-
|
44 |
-
|
45 |
-
def markdown_to_html_with_syntax_highlight(md_str):
|
46 |
-
def replacer(match):
|
47 |
-
lang = match.group(1) or "text"
|
48 |
-
code = match.group(2)
|
49 |
-
|
50 |
-
try:
|
51 |
-
lexer = get_lexer_by_name(lang, stripall=True)
|
52 |
-
except ValueError:
|
53 |
-
lexer = get_lexer_by_name("text", stripall=True)
|
54 |
-
|
55 |
-
formatter = HtmlFormatter()
|
56 |
-
highlighted_code = highlight(code, lexer, formatter)
|
57 |
-
|
58 |
-
return f'<pre><code class="{lang}">{highlighted_code}</code></pre>'
|
59 |
-
|
60 |
-
code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
|
61 |
-
md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
|
62 |
-
|
63 |
-
html_str = markdown(md_str)
|
64 |
-
return html_str
|
65 |
-
|
66 |
-
|
67 |
-
def normalize_markdown(md_text: str) -> str:
|
68 |
-
lines = md_text.split("\n")
|
69 |
-
normalized_lines = []
|
70 |
-
inside_list = False
|
71 |
-
|
72 |
-
for i, line in enumerate(lines):
|
73 |
-
if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
|
74 |
-
if not inside_list and i > 0 and lines[i - 1].strip() != "":
|
75 |
-
normalized_lines.append("")
|
76 |
-
inside_list = True
|
77 |
-
normalized_lines.append(line)
|
78 |
-
elif inside_list and line.strip() == "":
|
79 |
-
if i < len(lines) - 1 and not re.match(
|
80 |
-
r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
|
81 |
-
):
|
82 |
-
normalized_lines.append(line)
|
83 |
-
continue
|
84 |
-
else:
|
85 |
-
inside_list = False
|
86 |
-
normalized_lines.append(line)
|
87 |
-
|
88 |
-
return "\n".join(normalized_lines)
|
89 |
-
|
90 |
-
|
91 |
-
def convert_mdtext(md_text):
|
92 |
-
code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
|
93 |
-
inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
|
94 |
-
code_blocks = code_block_pattern.findall(md_text)
|
95 |
-
non_code_parts = code_block_pattern.split(md_text)[::2]
|
96 |
-
|
97 |
-
result = []
|
98 |
-
for non_code, code in zip(non_code_parts, code_blocks + [""]):
|
99 |
-
if non_code.strip():
|
100 |
-
non_code = normalize_markdown(non_code)
|
101 |
-
if inline_code_pattern.search(non_code):
|
102 |
-
result.append(markdown(non_code, extensions=["tables"]))
|
103 |
-
else:
|
104 |
-
result.append(mdtex2html.convert(non_code, extensions=["tables"]))
|
105 |
-
if code.strip():
|
106 |
-
# _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题
|
107 |
-
# code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题
|
108 |
-
code = f"\n```{code}\n\n```"
|
109 |
-
code = markdown_to_html_with_syntax_highlight(code)
|
110 |
-
result.append(code)
|
111 |
-
result = "".join(result)
|
112 |
-
result += ALREADY_CONVERTED_MARK
|
113 |
-
return result
|
114 |
-
|
115 |
-
|
116 |
-
def convert_asis(userinput):
|
117 |
-
return (
|
118 |
-
f'<p style="white-space:pre-wrap;">{html.escape(userinput)}</p>'
|
119 |
-
+ ALREADY_CONVERTED_MARK
|
120 |
-
)
|
121 |
-
|
122 |
-
|
123 |
-
def detect_converted_mark(userinput):
|
124 |
-
if userinput.endswith(ALREADY_CONVERTED_MARK):
|
125 |
-
return True
|
126 |
-
else:
|
127 |
-
return False
|
128 |
-
|
129 |
-
|
130 |
-
def detect_language(code):
|
131 |
-
if code.startswith("\n"):
|
132 |
-
first_line = ""
|
133 |
-
else:
|
134 |
-
first_line = code.strip().split("\n", 1)[0]
|
135 |
-
language = first_line.lower() if first_line else ""
|
136 |
-
code_without_language = code[len(first_line) :].lstrip() if first_line else code
|
137 |
-
return language, code_without_language
|
138 |
-
|
139 |
-
|
140 |
-
def construct_text(role, text):
|
141 |
-
return {"role": role, "content": text}
|
142 |
-
|
143 |
-
|
144 |
-
def construct_user(text):
|
145 |
-
return construct_text("user", text)
|
146 |
-
|
147 |
-
|
148 |
-
def construct_system(text):
|
149 |
-
return construct_text("system", text)
|
150 |
-
|
151 |
-
|
152 |
-
def construct_assistant(text):
|
153 |
-
return construct_text("assistant", text)
|
154 |
-
|
155 |
-
|
156 |
-
def construct_token_message(tokens: List[int]):
|
157 |
-
token_sum = 0
|
158 |
-
for i in range(len(tokens)):
|
159 |
-
token_sum += sum(tokens[: i + 1])
|
160 |
-
return f"Token 计数: {sum(tokens)},本次对话累计消耗了 {token_sum} tokens"
|
161 |
-
|
162 |
-
|
163 |
-
def delete_first_conversation(history, previous_token_count):
|
164 |
-
if history:
|
165 |
-
del history[:2]
|
166 |
-
del previous_token_count[0]
|
167 |
-
return (
|
168 |
-
history,
|
169 |
-
previous_token_count,
|
170 |
-
construct_token_message(previous_token_count),
|
171 |
-
)
|
172 |
-
|
173 |
-
|
174 |
-
def delete_last_conversation(chatbot, history, previous_token_count):
|
175 |
-
if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
|
176 |
-
logging.info("由于包含报错信息,只删除chatbot记录")
|
177 |
-
chatbot.pop()
|
178 |
-
return chatbot, history
|
179 |
-
if len(history) > 0:
|
180 |
-
logging.info("删除了一组对话历史")
|
181 |
-
history.pop()
|
182 |
-
history.pop()
|
183 |
-
if len(chatbot) > 0:
|
184 |
-
logging.info("删除了一组chatbot对话")
|
185 |
-
chatbot.pop()
|
186 |
-
if len(previous_token_count) > 0:
|
187 |
-
logging.info("删除了一组对话的token计数记录")
|
188 |
-
previous_token_count.pop()
|
189 |
-
return (
|
190 |
-
chatbot,
|
191 |
-
history,
|
192 |
-
previous_token_count,
|
193 |
-
construct_token_message(previous_token_count),
|
194 |
-
)
|
195 |
-
|
196 |
-
|
197 |
-
def save_file(filename, system, history, chatbot, user_name):
|
198 |
-
logging.info(f"{user_name} 保存对话历史中……")
|
199 |
-
os.makedirs(HISTORY_DIR / user_name, exist_ok=True)
|
200 |
-
if filename.endswith(".json"):
|
201 |
-
json_s = {"system": system, "history": history, "chatbot": chatbot}
|
202 |
-
print(json_s)
|
203 |
-
with open(os.path.join(HISTORY_DIR / user_name, filename), "w") as f:
|
204 |
-
json.dump(json_s, f)
|
205 |
-
elif filename.endswith(".md"):
|
206 |
-
md_s = f"system: \n- {system} \n"
|
207 |
-
for data in history:
|
208 |
-
md_s += f"\n{data['role']}: \n- {data['content']} \n"
|
209 |
-
with open(os.path.join(HISTORY_DIR / user_name, filename), "w", encoding="utf8") as f:
|
210 |
-
f.write(md_s)
|
211 |
-
logging.info(f"{user_name} 保存对话历史完毕")
|
212 |
-
return os.path.join(HISTORY_DIR / user_name, filename)
|
213 |
-
|
214 |
-
|
215 |
-
def save_chat_history(filename, system, history, chatbot, user_name):
|
216 |
-
if filename == "":
|
217 |
-
return
|
218 |
-
if not filename.endswith(".json"):
|
219 |
-
filename += ".json"
|
220 |
-
return save_file(filename, system, history, chatbot, user_name)
|
221 |
-
|
222 |
-
|
223 |
-
def export_markdown(filename, system, history, chatbot, user_name):
|
224 |
-
if filename == "":
|
225 |
-
return
|
226 |
-
if not filename.endswith(".md"):
|
227 |
-
filename += ".md"
|
228 |
-
return save_file(filename, system, history, chatbot, user_name)
|
229 |
-
|
230 |
-
|
231 |
-
def load_chat_history(filename, system, history, chatbot, user_name):
|
232 |
-
logging.info(f"{user_name} 加载对话历史中……")
|
233 |
-
if type(filename) != str:
|
234 |
-
filename = filename.name
|
235 |
-
try:
|
236 |
-
with open(os.path.join(HISTORY_DIR / user_name, filename), "r") as f:
|
237 |
-
json_s = json.load(f)
|
238 |
-
try:
|
239 |
-
if type(json_s["history"][0]) == str:
|
240 |
-
logging.info("历史记录格式为旧版,正在转换……")
|
241 |
-
new_history = []
|
242 |
-
for index, item in enumerate(json_s["history"]):
|
243 |
-
if index % 2 == 0:
|
244 |
-
new_history.append(construct_user(item))
|
245 |
-
else:
|
246 |
-
new_history.append(construct_assistant(item))
|
247 |
-
json_s["history"] = new_history
|
248 |
-
logging.info(new_history)
|
249 |
-
except:
|
250 |
-
# 没有对话历史
|
251 |
-
pass
|
252 |
-
logging.info(f"{user_name} 加载对话历史完毕")
|
253 |
-
return filename, json_s["system"], json_s["history"], json_s["chatbot"]
|
254 |
-
except FileNotFoundError:
|
255 |
-
logging.info(f"{user_name} 没有找到对话历史文件,不执行任何操作")
|
256 |
-
return filename, system, history, chatbot
|
257 |
-
|
258 |
-
|
259 |
-
def sorted_by_pinyin(list):
|
260 |
-
return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
|
261 |
-
|
262 |
-
|
263 |
-
def get_file_names(dir, plain=False, filetypes=[".json"]):
|
264 |
-
logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
|
265 |
-
files = []
|
266 |
-
try:
|
267 |
-
for type in filetypes:
|
268 |
-
files += [f for f in os.listdir(dir) if f.endswith(type)]
|
269 |
-
except FileNotFoundError:
|
270 |
-
files = []
|
271 |
-
files = sorted_by_pinyin(files)
|
272 |
-
if files == []:
|
273 |
-
files = [""]
|
274 |
-
logging.debug(f"files are:{files}")
|
275 |
-
if plain:
|
276 |
-
return files
|
277 |
-
else:
|
278 |
-
return gr.Dropdown.update(choices=files)
|
279 |
-
|
280 |
-
|
281 |
-
def get_history_names(plain=False, user_name=""):
|
282 |
-
logging.info(f"从用户 {user_name} 中获取历史记录文件名列表")
|
283 |
-
return get_file_names(HISTORY_DIR / user_name, plain)
|
284 |
-
|
285 |
-
|
286 |
-
def load_template(filename, mode=0):
|
287 |
-
logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
|
288 |
-
lines = []
|
289 |
-
logging.info("Loading template...")
|
290 |
-
if filename.endswith(".json"):
|
291 |
-
with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
|
292 |
-
lines = json.load(f)
|
293 |
-
lines = [[i["act"], i["prompt"]] for i in lines]
|
294 |
-
else:
|
295 |
-
with open(
|
296 |
-
os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8"
|
297 |
-
) as csvfile:
|
298 |
-
reader = csv.reader(csvfile)
|
299 |
-
lines = list(reader)
|
300 |
-
lines = lines[1:]
|
301 |
-
if mode == 1:
|
302 |
-
return sorted_by_pinyin([row[0] for row in lines])
|
303 |
-
elif mode == 2:
|
304 |
-
return {row[0]: row[1] for row in lines}
|
305 |
-
else:
|
306 |
-
choices = sorted_by_pinyin([row[0] for row in lines])
|
307 |
-
return {row[0]: row[1] for row in lines}, gr.Dropdown.update(
|
308 |
-
choices=choices
|
309 |
-
)
|
310 |
-
|
311 |
-
|
312 |
-
def get_template_names(plain=False):
|
313 |
-
logging.info("获取模板文件名列表")
|
314 |
-
return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
|
315 |
-
|
316 |
-
|
317 |
-
def get_template_content(templates, selection, original_system_prompt):
|
318 |
-
logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
|
319 |
-
try:
|
320 |
-
return templates[selection]
|
321 |
-
except:
|
322 |
-
return original_system_prompt
|
323 |
-
|
324 |
-
|
325 |
-
def reset_state():
|
326 |
-
logging.info("重置状态")
|
327 |
-
return [], [], [], construct_token_message([0])
|
328 |
-
|
329 |
-
|
330 |
-
def reset_textbox():
|
331 |
-
logging.debug("重置文本框")
|
332 |
-
return gr.update(value="")
|
333 |
-
|
334 |
-
|
335 |
-
def reset_default():
|
336 |
-
default_host = shared.state.reset_api_host()
|
337 |
-
retrieve_proxy("")
|
338 |
-
return gr.update(value=default_host), gr.update(value=""), "API-Host 和代理已重置"
|
339 |
-
|
340 |
-
|
341 |
-
def change_api_host(host):
|
342 |
-
shared.state.set_api_host(host)
|
343 |
-
msg = f"API-Host更改为了{host}"
|
344 |
-
logging.info(msg)
|
345 |
-
return msg
|
346 |
-
|
347 |
-
|
348 |
-
def change_proxy(proxy):
|
349 |
-
retrieve_proxy(proxy)
|
350 |
-
os.environ["HTTPS_PROXY"] = proxy
|
351 |
-
msg = f"代理更改为了{proxy}"
|
352 |
-
logging.info(msg)
|
353 |
-
return msg
|
354 |
-
|
355 |
-
|
356 |
-
def hide_middle_chars(s):
|
357 |
-
if s is None:
|
358 |
-
return ""
|
359 |
-
if len(s) <= 8:
|
360 |
-
return s
|
361 |
-
else:
|
362 |
-
head = s[:4]
|
363 |
-
tail = s[-4:]
|
364 |
-
hidden = "*" * (len(s) - 8)
|
365 |
-
return head + hidden + tail
|
366 |
-
|
367 |
-
|
368 |
-
def submit_key(key):
|
369 |
-
key = key.strip()
|
370 |
-
msg = f"API密钥更改为了{hide_middle_chars(key)}"
|
371 |
-
logging.info(msg)
|
372 |
-
return key, msg
|
373 |
-
|
374 |
-
|
375 |
-
def replace_today(prompt):
|
376 |
-
today = datetime.datetime.today().strftime("%Y-%m-%d")
|
377 |
-
return prompt.replace("{current_date}", today)
|
378 |
-
|
379 |
-
|
380 |
-
def get_geoip():
|
381 |
-
try:
|
382 |
-
with retrieve_proxy():
|
383 |
-
response = requests.get("https://ipapi.co/json/", timeout=5)
|
384 |
-
data = response.json()
|
385 |
-
except:
|
386 |
-
data = {"error": True, "reason": "连接ipapi失败"}
|
387 |
-
if "error" in data.keys():
|
388 |
-
logging.warning(f"无法获取IP地址信息。\n{data}")
|
389 |
-
if data["reason"] == "RateLimited":
|
390 |
-
return (
|
391 |
-
f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用。"
|
392 |
-
)
|
393 |
-
else:
|
394 |
-
return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。"
|
395 |
-
else:
|
396 |
-
country = data["country_name"]
|
397 |
-
if country == "China":
|
398 |
-
text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**"
|
399 |
-
else:
|
400 |
-
text = f"您的IP区域:{country}。"
|
401 |
-
logging.info(text)
|
402 |
-
return text
|
403 |
-
|
404 |
-
|
405 |
-
def find_n(lst, max_num):
|
406 |
-
n = len(lst)
|
407 |
-
total = sum(lst)
|
408 |
-
|
409 |
-
if total < max_num:
|
410 |
-
return n
|
411 |
-
|
412 |
-
for i in range(len(lst)):
|
413 |
-
if total - lst[i] < max_num:
|
414 |
-
return n - i - 1
|
415 |
-
total = total - lst[i]
|
416 |
-
return 1
|
417 |
-
|
418 |
-
|
419 |
-
def start_outputing():
|
420 |
-
logging.debug("显示取消按钮,隐藏发送按钮")
|
421 |
-
return gr.Button.update(visible=True), gr.Button.update(visible=False)
|
422 |
-
|
423 |
-
|
424 |
-
def end_outputing():
|
425 |
-
return (
|
426 |
-
gr.Button.update(visible=True),
|
427 |
-
gr.Button.update(visible=False),
|
428 |
-
)
|
429 |
-
|
430 |
-
|
431 |
-
def cancel_outputing():
|
432 |
-
logging.info("中止输出……")
|
433 |
-
shared.state.interrupt()
|
434 |
-
|
435 |
-
|
436 |
-
def transfer_input(inputs):
|
437 |
-
# 一次性返回,降低延迟
|
438 |
-
textbox = reset_textbox()
|
439 |
-
outputing = start_outputing()
|
440 |
-
return (
|
441 |
-
inputs,
|
442 |
-
gr.update(value=""),
|
443 |
-
gr.Button.update(visible=True),
|
444 |
-
gr.Button.update(visible=False),
|
445 |
-
)
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
def run(command, desc=None, errdesc=None, custom_env=None, live=False):
|
450 |
-
if desc is not None:
|
451 |
-
print(desc)
|
452 |
-
if live:
|
453 |
-
result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env)
|
454 |
-
if result.returncode != 0:
|
455 |
-
raise RuntimeError(f"""{errdesc or 'Error running command'}.
|
456 |
-
Command: {command}
|
457 |
-
Error code: {result.returncode}""")
|
458 |
-
|
459 |
-
return ""
|
460 |
-
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
|
461 |
-
if result.returncode != 0:
|
462 |
-
message = f"""{errdesc or 'Error running command'}.
|
463 |
-
Command: {command}
|
464 |
-
Error code: {result.returncode}
|
465 |
-
stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
|
466 |
-
stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
|
467 |
-
"""
|
468 |
-
raise RuntimeError(message)
|
469 |
-
return result.stdout.decode(encoding="utf8", errors="ignore")
|
470 |
-
|
471 |
-
def versions_html():
|
472 |
-
git = os.environ.get('GIT', "git")
|
473 |
-
python_version = ".".join([str(x) for x in sys.version_info[0:3]])
|
474 |
-
try:
|
475 |
-
commit_hash = run(f"{git} rev-parse HEAD").strip()
|
476 |
-
except Exception:
|
477 |
-
commit_hash = "<none>"
|
478 |
-
if commit_hash != "<none>":
|
479 |
-
short_commit = commit_hash[0:7]
|
480 |
-
commit_info = f"<a style=\"text-decoration:none\" href=\"https://github.com/GaiZhenbiao/ChuanhuChatGPT/commit/{short_commit}\">{short_commit}</a>"
|
481 |
-
else:
|
482 |
-
commit_info = "unknown \U0001F615"
|
483 |
-
return f"""
|
484 |
-
Python: <span title="{sys.version}">{python_version}</span>
|
485 |
-
•
|
486 |
-
Gradio: {gr.__version__}
|
487 |
-
•
|
488 |
-
Commit: {commit_info}
|
489 |
-
"""
|
490 |
-
|
491 |
-
def add_source_numbers(lst, source_name = "Source", use_source = True):
|
492 |
-
if use_source:
|
493 |
-
return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)]
|
494 |
-
else:
|
495 |
-
return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)]
|
496 |
-
|
497 |
-
def add_details(lst):
|
498 |
-
nodes = []
|
499 |
-
for index, txt in enumerate(lst):
|
500 |
-
brief = txt[:25].replace("\n", "")
|
501 |
-
nodes.append(
|
502 |
-
f"<details><summary>{brief}...</summary><p>{txt}</p></details>"
|
503 |
-
)
|
504 |
-
return nodes
|
505 |
-
|
506 |
-
|
507 |
-
def sheet_to_string(sheet):
|
508 |
-
result = ""
|
509 |
-
for index, row in sheet.iterrows():
|
510 |
-
row_string = ""
|
511 |
-
for column in sheet.columns:
|
512 |
-
row_string += f"{column}: {row[column]}, "
|
513 |
-
row_string = row_string.rstrip(", ")
|
514 |
-
row_string += "."
|
515 |
-
result += row_string + "\n"
|
516 |
-
return result
|
517 |
-
|
518 |
-
def excel_to_string(file_path):
|
519 |
-
# 读取Excel文件中的所有工作表
|
520 |
-
excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None)
|
521 |
-
|
522 |
-
# 初始化结果字符串
|
523 |
-
result = ""
|
524 |
-
|
525 |
-
# 遍历每一个工作表
|
526 |
-
for sheet_name, sheet_data in excel_file.items():
|
527 |
-
# 将工作表名称添加到结果字符串
|
528 |
-
result += f"Sheet: {sheet_name}\n"
|
529 |
-
|
530 |
-
# 处理当前工作表并添加到结果字符串
|
531 |
-
result += sheet_to_string(sheet_data)
|
532 |
-
|
533 |
-
# 在不同工作表之间添加分隔符
|
534 |
-
result += "\n" + ("-" * 20) + "\n\n"
|
535 |
-
|
536 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Armandoliv/cars-parts-segmentation-resnet18/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Cars Parts Segmentation Resnet18
|
3 |
-
emoji: 💩
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/slconfig.py
DELETED
@@ -1,427 +0,0 @@
|
|
1 |
-
# ==========================================================
|
2 |
-
# Modified from mmcv
|
3 |
-
# ==========================================================
|
4 |
-
import ast
|
5 |
-
import os
|
6 |
-
import os.path as osp
|
7 |
-
import shutil
|
8 |
-
import sys
|
9 |
-
import tempfile
|
10 |
-
from argparse import Action
|
11 |
-
from importlib import import_module
|
12 |
-
|
13 |
-
from addict import Dict
|
14 |
-
from yapf.yapflib.yapf_api import FormatCode
|
15 |
-
|
16 |
-
BASE_KEY = "_base_"
|
17 |
-
DELETE_KEY = "_delete_"
|
18 |
-
RESERVED_KEYS = ["filename", "text", "pretty_text", "get", "dump", "merge_from_dict"]
|
19 |
-
|
20 |
-
|
21 |
-
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
|
22 |
-
if not osp.isfile(filename):
|
23 |
-
raise FileNotFoundError(msg_tmpl.format(filename))
|
24 |
-
|
25 |
-
|
26 |
-
class ConfigDict(Dict):
|
27 |
-
def __missing__(self, name):
|
28 |
-
raise KeyError(name)
|
29 |
-
|
30 |
-
def __getattr__(self, name):
|
31 |
-
try:
|
32 |
-
value = super(ConfigDict, self).__getattr__(name)
|
33 |
-
except KeyError:
|
34 |
-
ex = AttributeError(f"'{self.__class__.__name__}' object has no " f"attribute '{name}'")
|
35 |
-
except Exception as e:
|
36 |
-
ex = e
|
37 |
-
else:
|
38 |
-
return value
|
39 |
-
raise ex
|
40 |
-
|
41 |
-
|
42 |
-
class SLConfig(object):
|
43 |
-
"""
|
44 |
-
config files.
|
45 |
-
only support .py file as config now.
|
46 |
-
|
47 |
-
ref: mmcv.utils.config
|
48 |
-
|
49 |
-
Example:
|
50 |
-
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
|
51 |
-
>>> cfg.a
|
52 |
-
1
|
53 |
-
>>> cfg.b
|
54 |
-
{'b1': [0, 1]}
|
55 |
-
>>> cfg.b.b1
|
56 |
-
[0, 1]
|
57 |
-
>>> cfg = Config.fromfile('tests/data/config/a.py')
|
58 |
-
>>> cfg.filename
|
59 |
-
"/home/kchen/projects/mmcv/tests/data/config/a.py"
|
60 |
-
>>> cfg.item4
|
61 |
-
'test'
|
62 |
-
>>> cfg
|
63 |
-
"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
|
64 |
-
"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
|
65 |
-
"""
|
66 |
-
|
67 |
-
@staticmethod
|
68 |
-
def _validate_py_syntax(filename):
|
69 |
-
with open(filename) as f:
|
70 |
-
content = f.read()
|
71 |
-
try:
|
72 |
-
ast.parse(content)
|
73 |
-
except SyntaxError:
|
74 |
-
raise SyntaxError("There are syntax errors in config " f"file {filename}")
|
75 |
-
|
76 |
-
@staticmethod
|
77 |
-
def _file2dict(filename):
|
78 |
-
filename = osp.abspath(osp.expanduser(filename))
|
79 |
-
check_file_exist(filename)
|
80 |
-
if filename.lower().endswith(".py"):
|
81 |
-
with tempfile.TemporaryDirectory() as temp_config_dir:
|
82 |
-
temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=".py")
|
83 |
-
temp_config_name = osp.basename(temp_config_file.name)
|
84 |
-
if os.name == 'nt':
|
85 |
-
temp_config_file.close()
|
86 |
-
shutil.copyfile(filename, osp.join(temp_config_dir, temp_config_name))
|
87 |
-
temp_module_name = osp.splitext(temp_config_name)[0]
|
88 |
-
sys.path.insert(0, temp_config_dir)
|
89 |
-
SLConfig._validate_py_syntax(filename)
|
90 |
-
mod = import_module(temp_module_name)
|
91 |
-
sys.path.pop(0)
|
92 |
-
cfg_dict = {
|
93 |
-
name: value for name, value in mod.__dict__.items() if not name.startswith("__")
|
94 |
-
}
|
95 |
-
# delete imported module
|
96 |
-
del sys.modules[temp_module_name]
|
97 |
-
# close temp file
|
98 |
-
temp_config_file.close()
|
99 |
-
elif filename.lower().endswith((".yml", ".yaml", ".json")):
|
100 |
-
from .slio import slload
|
101 |
-
|
102 |
-
cfg_dict = slload(filename)
|
103 |
-
else:
|
104 |
-
raise IOError("Only py/yml/yaml/json type are supported now!")
|
105 |
-
|
106 |
-
cfg_text = filename + "\n"
|
107 |
-
with open(filename, "r") as f:
|
108 |
-
cfg_text += f.read()
|
109 |
-
|
110 |
-
# parse the base file
|
111 |
-
if BASE_KEY in cfg_dict:
|
112 |
-
cfg_dir = osp.dirname(filename)
|
113 |
-
base_filename = cfg_dict.pop(BASE_KEY)
|
114 |
-
base_filename = base_filename if isinstance(base_filename, list) else [base_filename]
|
115 |
-
|
116 |
-
cfg_dict_list = list()
|
117 |
-
cfg_text_list = list()
|
118 |
-
for f in base_filename:
|
119 |
-
_cfg_dict, _cfg_text = SLConfig._file2dict(osp.join(cfg_dir, f))
|
120 |
-
cfg_dict_list.append(_cfg_dict)
|
121 |
-
cfg_text_list.append(_cfg_text)
|
122 |
-
|
123 |
-
base_cfg_dict = dict()
|
124 |
-
for c in cfg_dict_list:
|
125 |
-
if len(base_cfg_dict.keys() & c.keys()) > 0:
|
126 |
-
raise KeyError("Duplicate key is not allowed among bases")
|
127 |
-
# TODO Allow the duplicate key while warnning user
|
128 |
-
base_cfg_dict.update(c)
|
129 |
-
|
130 |
-
base_cfg_dict = SLConfig._merge_a_into_b(cfg_dict, base_cfg_dict)
|
131 |
-
cfg_dict = base_cfg_dict
|
132 |
-
|
133 |
-
# merge cfg_text
|
134 |
-
cfg_text_list.append(cfg_text)
|
135 |
-
cfg_text = "\n".join(cfg_text_list)
|
136 |
-
|
137 |
-
return cfg_dict, cfg_text
|
138 |
-
|
139 |
-
@staticmethod
|
140 |
-
def _merge_a_into_b(a, b):
|
141 |
-
"""merge dict `a` into dict `b` (non-inplace).
|
142 |
-
values in `a` will overwrite `b`.
|
143 |
-
copy first to avoid inplace modification
|
144 |
-
|
145 |
-
Args:
|
146 |
-
a ([type]): [description]
|
147 |
-
b ([type]): [description]
|
148 |
-
|
149 |
-
Returns:
|
150 |
-
[dict]: [description]
|
151 |
-
"""
|
152 |
-
# import ipdb; ipdb.set_trace()
|
153 |
-
if not isinstance(a, dict):
|
154 |
-
return a
|
155 |
-
|
156 |
-
b = b.copy()
|
157 |
-
for k, v in a.items():
|
158 |
-
if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False):
|
159 |
-
|
160 |
-
if not isinstance(b[k], dict) and not isinstance(b[k], list):
|
161 |
-
# if :
|
162 |
-
# import ipdb; ipdb.set_trace()
|
163 |
-
raise TypeError(
|
164 |
-
f"{k}={v} in child config cannot inherit from base "
|
165 |
-
f"because {k} is a dict in the child config but is of "
|
166 |
-
f"type {type(b[k])} in base config. You may set "
|
167 |
-
f"`{DELETE_KEY}=True` to ignore the base config"
|
168 |
-
)
|
169 |
-
b[k] = SLConfig._merge_a_into_b(v, b[k])
|
170 |
-
elif isinstance(b, list):
|
171 |
-
try:
|
172 |
-
_ = int(k)
|
173 |
-
except:
|
174 |
-
raise TypeError(
|
175 |
-
f"b is a list, " f"index {k} should be an int when input but {type(k)}"
|
176 |
-
)
|
177 |
-
b[int(k)] = SLConfig._merge_a_into_b(v, b[int(k)])
|
178 |
-
else:
|
179 |
-
b[k] = v
|
180 |
-
|
181 |
-
return b
|
182 |
-
|
183 |
-
@staticmethod
|
184 |
-
def fromfile(filename):
|
185 |
-
cfg_dict, cfg_text = SLConfig._file2dict(filename)
|
186 |
-
return SLConfig(cfg_dict, cfg_text=cfg_text, filename=filename)
|
187 |
-
|
188 |
-
def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
|
189 |
-
if cfg_dict is None:
|
190 |
-
cfg_dict = dict()
|
191 |
-
elif not isinstance(cfg_dict, dict):
|
192 |
-
raise TypeError("cfg_dict must be a dict, but " f"got {type(cfg_dict)}")
|
193 |
-
for key in cfg_dict:
|
194 |
-
if key in RESERVED_KEYS:
|
195 |
-
raise KeyError(f"{key} is reserved for config file")
|
196 |
-
|
197 |
-
super(SLConfig, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
|
198 |
-
super(SLConfig, self).__setattr__("_filename", filename)
|
199 |
-
if cfg_text:
|
200 |
-
text = cfg_text
|
201 |
-
elif filename:
|
202 |
-
with open(filename, "r") as f:
|
203 |
-
text = f.read()
|
204 |
-
else:
|
205 |
-
text = ""
|
206 |
-
super(SLConfig, self).__setattr__("_text", text)
|
207 |
-
|
208 |
-
@property
|
209 |
-
def filename(self):
|
210 |
-
return self._filename
|
211 |
-
|
212 |
-
@property
|
213 |
-
def text(self):
|
214 |
-
return self._text
|
215 |
-
|
216 |
-
@property
|
217 |
-
def pretty_text(self):
|
218 |
-
|
219 |
-
indent = 4
|
220 |
-
|
221 |
-
def _indent(s_, num_spaces):
|
222 |
-
s = s_.split("\n")
|
223 |
-
if len(s) == 1:
|
224 |
-
return s_
|
225 |
-
first = s.pop(0)
|
226 |
-
s = [(num_spaces * " ") + line for line in s]
|
227 |
-
s = "\n".join(s)
|
228 |
-
s = first + "\n" + s
|
229 |
-
return s
|
230 |
-
|
231 |
-
def _format_basic_types(k, v, use_mapping=False):
|
232 |
-
if isinstance(v, str):
|
233 |
-
v_str = f"'{v}'"
|
234 |
-
else:
|
235 |
-
v_str = str(v)
|
236 |
-
|
237 |
-
if use_mapping:
|
238 |
-
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
239 |
-
attr_str = f"{k_str}: {v_str}"
|
240 |
-
else:
|
241 |
-
attr_str = f"{str(k)}={v_str}"
|
242 |
-
attr_str = _indent(attr_str, indent)
|
243 |
-
|
244 |
-
return attr_str
|
245 |
-
|
246 |
-
def _format_list(k, v, use_mapping=False):
|
247 |
-
# check if all items in the list are dict
|
248 |
-
if all(isinstance(_, dict) for _ in v):
|
249 |
-
v_str = "[\n"
|
250 |
-
v_str += "\n".join(
|
251 |
-
f"dict({_indent(_format_dict(v_), indent)})," for v_ in v
|
252 |
-
).rstrip(",")
|
253 |
-
if use_mapping:
|
254 |
-
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
255 |
-
attr_str = f"{k_str}: {v_str}"
|
256 |
-
else:
|
257 |
-
attr_str = f"{str(k)}={v_str}"
|
258 |
-
attr_str = _indent(attr_str, indent) + "]"
|
259 |
-
else:
|
260 |
-
attr_str = _format_basic_types(k, v, use_mapping)
|
261 |
-
return attr_str
|
262 |
-
|
263 |
-
def _contain_invalid_identifier(dict_str):
|
264 |
-
contain_invalid_identifier = False
|
265 |
-
for key_name in dict_str:
|
266 |
-
contain_invalid_identifier |= not str(key_name).isidentifier()
|
267 |
-
return contain_invalid_identifier
|
268 |
-
|
269 |
-
def _format_dict(input_dict, outest_level=False):
|
270 |
-
r = ""
|
271 |
-
s = []
|
272 |
-
|
273 |
-
use_mapping = _contain_invalid_identifier(input_dict)
|
274 |
-
if use_mapping:
|
275 |
-
r += "{"
|
276 |
-
for idx, (k, v) in enumerate(input_dict.items()):
|
277 |
-
is_last = idx >= len(input_dict) - 1
|
278 |
-
end = "" if outest_level or is_last else ","
|
279 |
-
if isinstance(v, dict):
|
280 |
-
v_str = "\n" + _format_dict(v)
|
281 |
-
if use_mapping:
|
282 |
-
k_str = f"'{k}'" if isinstance(k, str) else str(k)
|
283 |
-
attr_str = f"{k_str}: dict({v_str}"
|
284 |
-
else:
|
285 |
-
attr_str = f"{str(k)}=dict({v_str}"
|
286 |
-
attr_str = _indent(attr_str, indent) + ")" + end
|
287 |
-
elif isinstance(v, list):
|
288 |
-
attr_str = _format_list(k, v, use_mapping) + end
|
289 |
-
else:
|
290 |
-
attr_str = _format_basic_types(k, v, use_mapping) + end
|
291 |
-
|
292 |
-
s.append(attr_str)
|
293 |
-
r += "\n".join(s)
|
294 |
-
if use_mapping:
|
295 |
-
r += "}"
|
296 |
-
return r
|
297 |
-
|
298 |
-
cfg_dict = self._cfg_dict.to_dict()
|
299 |
-
text = _format_dict(cfg_dict, outest_level=True)
|
300 |
-
# copied from setup.cfg
|
301 |
-
yapf_style = dict(
|
302 |
-
based_on_style="pep8",
|
303 |
-
blank_line_before_nested_class_or_def=True,
|
304 |
-
split_before_expression_after_opening_paren=True,
|
305 |
-
)
|
306 |
-
text, _ = FormatCode(text, style_config=yapf_style, verify=True)
|
307 |
-
|
308 |
-
return text
|
309 |
-
|
310 |
-
def __repr__(self):
|
311 |
-
return f"Config (path: {self.filename}): {self._cfg_dict.__repr__()}"
|
312 |
-
|
313 |
-
def __len__(self):
|
314 |
-
return len(self._cfg_dict)
|
315 |
-
|
316 |
-
def __getattr__(self, name):
|
317 |
-
# # debug
|
318 |
-
# print('+'*15)
|
319 |
-
# print('name=%s' % name)
|
320 |
-
# print("addr:", id(self))
|
321 |
-
# # print('type(self):', type(self))
|
322 |
-
# print(self.__dict__)
|
323 |
-
# print('+'*15)
|
324 |
-
# if self.__dict__ == {}:
|
325 |
-
# raise ValueError
|
326 |
-
|
327 |
-
return getattr(self._cfg_dict, name)
|
328 |
-
|
329 |
-
def __getitem__(self, name):
|
330 |
-
return self._cfg_dict.__getitem__(name)
|
331 |
-
|
332 |
-
def __setattr__(self, name, value):
|
333 |
-
if isinstance(value, dict):
|
334 |
-
value = ConfigDict(value)
|
335 |
-
self._cfg_dict.__setattr__(name, value)
|
336 |
-
|
337 |
-
def __setitem__(self, name, value):
|
338 |
-
if isinstance(value, dict):
|
339 |
-
value = ConfigDict(value)
|
340 |
-
self._cfg_dict.__setitem__(name, value)
|
341 |
-
|
342 |
-
def __iter__(self):
|
343 |
-
return iter(self._cfg_dict)
|
344 |
-
|
345 |
-
def dump(self, file=None):
|
346 |
-
# import ipdb; ipdb.set_trace()
|
347 |
-
if file is None:
|
348 |
-
return self.pretty_text
|
349 |
-
else:
|
350 |
-
with open(file, "w") as f:
|
351 |
-
f.write(self.pretty_text)
|
352 |
-
|
353 |
-
def merge_from_dict(self, options):
|
354 |
-
"""Merge list into cfg_dict
|
355 |
-
|
356 |
-
Merge the dict parsed by MultipleKVAction into this cfg.
|
357 |
-
|
358 |
-
Examples:
|
359 |
-
>>> options = {'model.backbone.depth': 50,
|
360 |
-
... 'model.backbone.with_cp':True}
|
361 |
-
>>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
|
362 |
-
>>> cfg.merge_from_dict(options)
|
363 |
-
>>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
|
364 |
-
>>> assert cfg_dict == dict(
|
365 |
-
... model=dict(backbone=dict(depth=50, with_cp=True)))
|
366 |
-
|
367 |
-
Args:
|
368 |
-
options (dict): dict of configs to merge from.
|
369 |
-
"""
|
370 |
-
option_cfg_dict = {}
|
371 |
-
for full_key, v in options.items():
|
372 |
-
d = option_cfg_dict
|
373 |
-
key_list = full_key.split(".")
|
374 |
-
for subkey in key_list[:-1]:
|
375 |
-
d.setdefault(subkey, ConfigDict())
|
376 |
-
d = d[subkey]
|
377 |
-
subkey = key_list[-1]
|
378 |
-
d[subkey] = v
|
379 |
-
|
380 |
-
cfg_dict = super(SLConfig, self).__getattribute__("_cfg_dict")
|
381 |
-
super(SLConfig, self).__setattr__(
|
382 |
-
"_cfg_dict", SLConfig._merge_a_into_b(option_cfg_dict, cfg_dict)
|
383 |
-
)
|
384 |
-
|
385 |
-
# for multiprocess
|
386 |
-
def __setstate__(self, state):
|
387 |
-
self.__init__(state)
|
388 |
-
|
389 |
-
def copy(self):
|
390 |
-
return SLConfig(self._cfg_dict.copy())
|
391 |
-
|
392 |
-
def deepcopy(self):
|
393 |
-
return SLConfig(self._cfg_dict.deepcopy())
|
394 |
-
|
395 |
-
|
396 |
-
class DictAction(Action):
|
397 |
-
"""
|
398 |
-
argparse action to split an argument into KEY=VALUE form
|
399 |
-
on the first = and append to a dictionary. List options should
|
400 |
-
be passed as comma separated values, i.e KEY=V1,V2,V3
|
401 |
-
"""
|
402 |
-
|
403 |
-
@staticmethod
|
404 |
-
def _parse_int_float_bool(val):
|
405 |
-
try:
|
406 |
-
return int(val)
|
407 |
-
except ValueError:
|
408 |
-
pass
|
409 |
-
try:
|
410 |
-
return float(val)
|
411 |
-
except ValueError:
|
412 |
-
pass
|
413 |
-
if val.lower() in ["true", "false"]:
|
414 |
-
return True if val.lower() == "true" else False
|
415 |
-
if val.lower() in ["none", "null"]:
|
416 |
-
return None
|
417 |
-
return val
|
418 |
-
|
419 |
-
def __call__(self, parser, namespace, values, option_string=None):
|
420 |
-
options = {}
|
421 |
-
for kv in values:
|
422 |
-
key, val = kv.split("=", maxsplit=1)
|
423 |
-
val = [self._parse_int_float_bool(v) for v in val.split(",")]
|
424 |
-
if len(val) == 1:
|
425 |
-
val = val[0]
|
426 |
-
options[key] = val
|
427 |
-
setattr(namespace, self.dest, options)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/utils.py
DELETED
@@ -1,610 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import json
|
3 |
-
import warnings
|
4 |
-
from collections import OrderedDict
|
5 |
-
from copy import deepcopy
|
6 |
-
from typing import Any, Dict, List
|
7 |
-
|
8 |
-
import numpy as np
|
9 |
-
import torch
|
10 |
-
from transformers import AutoTokenizer
|
11 |
-
|
12 |
-
from groundingdino.util.slconfig import SLConfig
|
13 |
-
|
14 |
-
|
15 |
-
def slprint(x, name="x"):
|
16 |
-
if isinstance(x, (torch.Tensor, np.ndarray)):
|
17 |
-
print(f"{name}.shape:", x.shape)
|
18 |
-
elif isinstance(x, (tuple, list)):
|
19 |
-
print("type x:", type(x))
|
20 |
-
for i in range(min(10, len(x))):
|
21 |
-
slprint(x[i], f"{name}[{i}]")
|
22 |
-
elif isinstance(x, dict):
|
23 |
-
for k, v in x.items():
|
24 |
-
slprint(v, f"{name}[{k}]")
|
25 |
-
else:
|
26 |
-
print(f"{name}.type:", type(x))
|
27 |
-
|
28 |
-
|
29 |
-
def clean_state_dict(state_dict):
|
30 |
-
new_state_dict = OrderedDict()
|
31 |
-
for k, v in state_dict.items():
|
32 |
-
if k[:7] == "module.":
|
33 |
-
k = k[7:] # remove `module.`
|
34 |
-
new_state_dict[k] = v
|
35 |
-
return new_state_dict
|
36 |
-
|
37 |
-
|
38 |
-
def renorm(
|
39 |
-
img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
40 |
-
) -> torch.FloatTensor:
|
41 |
-
# img: tensor(3,H,W) or tensor(B,3,H,W)
|
42 |
-
# return: same as img
|
43 |
-
assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
|
44 |
-
if img.dim() == 3:
|
45 |
-
assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
|
46 |
-
img.size(0),
|
47 |
-
str(img.size()),
|
48 |
-
)
|
49 |
-
img_perm = img.permute(1, 2, 0)
|
50 |
-
mean = torch.Tensor(mean)
|
51 |
-
std = torch.Tensor(std)
|
52 |
-
img_res = img_perm * std + mean
|
53 |
-
return img_res.permute(2, 0, 1)
|
54 |
-
else: # img.dim() == 4
|
55 |
-
assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
|
56 |
-
img.size(1),
|
57 |
-
str(img.size()),
|
58 |
-
)
|
59 |
-
img_perm = img.permute(0, 2, 3, 1)
|
60 |
-
mean = torch.Tensor(mean)
|
61 |
-
std = torch.Tensor(std)
|
62 |
-
img_res = img_perm * std + mean
|
63 |
-
return img_res.permute(0, 3, 1, 2)
|
64 |
-
|
65 |
-
|
66 |
-
class CocoClassMapper:
|
67 |
-
def __init__(self) -> None:
|
68 |
-
self.category_map_str = {
|
69 |
-
"1": 1,
|
70 |
-
"2": 2,
|
71 |
-
"3": 3,
|
72 |
-
"4": 4,
|
73 |
-
"5": 5,
|
74 |
-
"6": 6,
|
75 |
-
"7": 7,
|
76 |
-
"8": 8,
|
77 |
-
"9": 9,
|
78 |
-
"10": 10,
|
79 |
-
"11": 11,
|
80 |
-
"13": 12,
|
81 |
-
"14": 13,
|
82 |
-
"15": 14,
|
83 |
-
"16": 15,
|
84 |
-
"17": 16,
|
85 |
-
"18": 17,
|
86 |
-
"19": 18,
|
87 |
-
"20": 19,
|
88 |
-
"21": 20,
|
89 |
-
"22": 21,
|
90 |
-
"23": 22,
|
91 |
-
"24": 23,
|
92 |
-
"25": 24,
|
93 |
-
"27": 25,
|
94 |
-
"28": 26,
|
95 |
-
"31": 27,
|
96 |
-
"32": 28,
|
97 |
-
"33": 29,
|
98 |
-
"34": 30,
|
99 |
-
"35": 31,
|
100 |
-
"36": 32,
|
101 |
-
"37": 33,
|
102 |
-
"38": 34,
|
103 |
-
"39": 35,
|
104 |
-
"40": 36,
|
105 |
-
"41": 37,
|
106 |
-
"42": 38,
|
107 |
-
"43": 39,
|
108 |
-
"44": 40,
|
109 |
-
"46": 41,
|
110 |
-
"47": 42,
|
111 |
-
"48": 43,
|
112 |
-
"49": 44,
|
113 |
-
"50": 45,
|
114 |
-
"51": 46,
|
115 |
-
"52": 47,
|
116 |
-
"53": 48,
|
117 |
-
"54": 49,
|
118 |
-
"55": 50,
|
119 |
-
"56": 51,
|
120 |
-
"57": 52,
|
121 |
-
"58": 53,
|
122 |
-
"59": 54,
|
123 |
-
"60": 55,
|
124 |
-
"61": 56,
|
125 |
-
"62": 57,
|
126 |
-
"63": 58,
|
127 |
-
"64": 59,
|
128 |
-
"65": 60,
|
129 |
-
"67": 61,
|
130 |
-
"70": 62,
|
131 |
-
"72": 63,
|
132 |
-
"73": 64,
|
133 |
-
"74": 65,
|
134 |
-
"75": 66,
|
135 |
-
"76": 67,
|
136 |
-
"77": 68,
|
137 |
-
"78": 69,
|
138 |
-
"79": 70,
|
139 |
-
"80": 71,
|
140 |
-
"81": 72,
|
141 |
-
"82": 73,
|
142 |
-
"84": 74,
|
143 |
-
"85": 75,
|
144 |
-
"86": 76,
|
145 |
-
"87": 77,
|
146 |
-
"88": 78,
|
147 |
-
"89": 79,
|
148 |
-
"90": 80,
|
149 |
-
}
|
150 |
-
self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()}
|
151 |
-
self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()}
|
152 |
-
|
153 |
-
def origin2compact(self, idx):
|
154 |
-
return self.origin2compact_mapper[int(idx)]
|
155 |
-
|
156 |
-
def compact2origin(self, idx):
|
157 |
-
return self.compact2origin_mapper[int(idx)]
|
158 |
-
|
159 |
-
|
160 |
-
def to_device(item, device):
|
161 |
-
if isinstance(item, torch.Tensor):
|
162 |
-
return item.to(device)
|
163 |
-
elif isinstance(item, list):
|
164 |
-
return [to_device(i, device) for i in item]
|
165 |
-
elif isinstance(item, dict):
|
166 |
-
return {k: to_device(v, device) for k, v in item.items()}
|
167 |
-
else:
|
168 |
-
raise NotImplementedError(
|
169 |
-
"Call Shilong if you use other containers! type: {}".format(type(item))
|
170 |
-
)
|
171 |
-
|
172 |
-
|
173 |
-
#
|
174 |
-
def get_gaussian_mean(x, axis, other_axis, softmax=True):
|
175 |
-
"""
|
176 |
-
|
177 |
-
Args:
|
178 |
-
x (float): Input images(BxCxHxW)
|
179 |
-
axis (int): The index for weighted mean
|
180 |
-
other_axis (int): The other index
|
181 |
-
|
182 |
-
Returns: weighted index for axis, BxC
|
183 |
-
|
184 |
-
"""
|
185 |
-
mat2line = torch.sum(x, axis=other_axis)
|
186 |
-
# mat2line = mat2line / mat2line.mean() * 10
|
187 |
-
if softmax:
|
188 |
-
u = torch.softmax(mat2line, axis=2)
|
189 |
-
else:
|
190 |
-
u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6)
|
191 |
-
size = x.shape[axis]
|
192 |
-
ind = torch.linspace(0, 1, size).to(x.device)
|
193 |
-
batch = x.shape[0]
|
194 |
-
channel = x.shape[1]
|
195 |
-
index = ind.repeat([batch, channel, 1])
|
196 |
-
mean_position = torch.sum(index * u, dim=2)
|
197 |
-
return mean_position
|
198 |
-
|
199 |
-
|
200 |
-
def get_expected_points_from_map(hm, softmax=True):
|
201 |
-
"""get_gaussian_map_from_points
|
202 |
-
B,C,H,W -> B,N,2 float(0, 1) float(0, 1)
|
203 |
-
softargmax function
|
204 |
-
|
205 |
-
Args:
|
206 |
-
hm (float): Input images(BxCxHxW)
|
207 |
-
|
208 |
-
Returns:
|
209 |
-
weighted index for axis, BxCx2. float between 0 and 1.
|
210 |
-
|
211 |
-
"""
|
212 |
-
# hm = 10*hm
|
213 |
-
B, C, H, W = hm.shape
|
214 |
-
y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C
|
215 |
-
x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C
|
216 |
-
# return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2)
|
217 |
-
return torch.stack([x_mean, y_mean], dim=2)
|
218 |
-
|
219 |
-
|
220 |
-
# Positional encoding (section 5.1)
|
221 |
-
# borrow from nerf
|
222 |
-
class Embedder:
|
223 |
-
def __init__(self, **kwargs):
|
224 |
-
self.kwargs = kwargs
|
225 |
-
self.create_embedding_fn()
|
226 |
-
|
227 |
-
def create_embedding_fn(self):
|
228 |
-
embed_fns = []
|
229 |
-
d = self.kwargs["input_dims"]
|
230 |
-
out_dim = 0
|
231 |
-
if self.kwargs["include_input"]:
|
232 |
-
embed_fns.append(lambda x: x)
|
233 |
-
out_dim += d
|
234 |
-
|
235 |
-
max_freq = self.kwargs["max_freq_log2"]
|
236 |
-
N_freqs = self.kwargs["num_freqs"]
|
237 |
-
|
238 |
-
if self.kwargs["log_sampling"]:
|
239 |
-
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs)
|
240 |
-
else:
|
241 |
-
freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs)
|
242 |
-
|
243 |
-
for freq in freq_bands:
|
244 |
-
for p_fn in self.kwargs["periodic_fns"]:
|
245 |
-
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
|
246 |
-
out_dim += d
|
247 |
-
|
248 |
-
self.embed_fns = embed_fns
|
249 |
-
self.out_dim = out_dim
|
250 |
-
|
251 |
-
def embed(self, inputs):
|
252 |
-
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
|
253 |
-
|
254 |
-
|
255 |
-
def get_embedder(multires, i=0):
|
256 |
-
import torch.nn as nn
|
257 |
-
|
258 |
-
if i == -1:
|
259 |
-
return nn.Identity(), 3
|
260 |
-
|
261 |
-
embed_kwargs = {
|
262 |
-
"include_input": True,
|
263 |
-
"input_dims": 3,
|
264 |
-
"max_freq_log2": multires - 1,
|
265 |
-
"num_freqs": multires,
|
266 |
-
"log_sampling": True,
|
267 |
-
"periodic_fns": [torch.sin, torch.cos],
|
268 |
-
}
|
269 |
-
|
270 |
-
embedder_obj = Embedder(**embed_kwargs)
|
271 |
-
embed = lambda x, eo=embedder_obj: eo.embed(x)
|
272 |
-
return embed, embedder_obj.out_dim
|
273 |
-
|
274 |
-
|
275 |
-
class APOPMeter:
|
276 |
-
def __init__(self) -> None:
|
277 |
-
self.tp = 0
|
278 |
-
self.fp = 0
|
279 |
-
self.tn = 0
|
280 |
-
self.fn = 0
|
281 |
-
|
282 |
-
def update(self, pred, gt):
|
283 |
-
"""
|
284 |
-
Input:
|
285 |
-
pred, gt: Tensor()
|
286 |
-
"""
|
287 |
-
assert pred.shape == gt.shape
|
288 |
-
self.tp += torch.logical_and(pred == 1, gt == 1).sum().item()
|
289 |
-
self.fp += torch.logical_and(pred == 1, gt == 0).sum().item()
|
290 |
-
self.tn += torch.logical_and(pred == 0, gt == 0).sum().item()
|
291 |
-
self.tn += torch.logical_and(pred == 1, gt == 0).sum().item()
|
292 |
-
|
293 |
-
def update_cm(self, tp, fp, tn, fn):
|
294 |
-
self.tp += tp
|
295 |
-
self.fp += fp
|
296 |
-
self.tn += tn
|
297 |
-
self.tn += fn
|
298 |
-
|
299 |
-
|
300 |
-
def inverse_sigmoid(x, eps=1e-5):
|
301 |
-
x = x.clamp(min=0, max=1)
|
302 |
-
x1 = x.clamp(min=eps)
|
303 |
-
x2 = (1 - x).clamp(min=eps)
|
304 |
-
return torch.log(x1 / x2)
|
305 |
-
|
306 |
-
|
307 |
-
def get_raw_dict(args):
|
308 |
-
"""
|
309 |
-
return the dicf contained in args.
|
310 |
-
|
311 |
-
e.g:
|
312 |
-
>>> with open(path, 'w') as f:
|
313 |
-
json.dump(get_raw_dict(args), f, indent=2)
|
314 |
-
"""
|
315 |
-
if isinstance(args, argparse.Namespace):
|
316 |
-
return vars(args)
|
317 |
-
elif isinstance(args, dict):
|
318 |
-
return args
|
319 |
-
elif isinstance(args, SLConfig):
|
320 |
-
return args._cfg_dict
|
321 |
-
else:
|
322 |
-
raise NotImplementedError("Unknown type {}".format(type(args)))
|
323 |
-
|
324 |
-
|
325 |
-
def stat_tensors(tensor):
|
326 |
-
assert tensor.dim() == 1
|
327 |
-
tensor_sm = tensor.softmax(0)
|
328 |
-
entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum()
|
329 |
-
|
330 |
-
return {
|
331 |
-
"max": tensor.max(),
|
332 |
-
"min": tensor.min(),
|
333 |
-
"mean": tensor.mean(),
|
334 |
-
"var": tensor.var(),
|
335 |
-
"std": tensor.var() ** 0.5,
|
336 |
-
"entropy": entropy,
|
337 |
-
}
|
338 |
-
|
339 |
-
|
340 |
-
class NiceRepr:
|
341 |
-
"""Inherit from this class and define ``__nice__`` to "nicely" print your
|
342 |
-
objects.
|
343 |
-
|
344 |
-
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
|
345 |
-
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
|
346 |
-
If the inheriting class has a ``__len__``, method then the default
|
347 |
-
``__nice__`` method will return its length.
|
348 |
-
|
349 |
-
Example:
|
350 |
-
>>> class Foo(NiceRepr):
|
351 |
-
... def __nice__(self):
|
352 |
-
... return 'info'
|
353 |
-
>>> foo = Foo()
|
354 |
-
>>> assert str(foo) == '<Foo(info)>'
|
355 |
-
>>> assert repr(foo).startswith('<Foo(info) at ')
|
356 |
-
|
357 |
-
Example:
|
358 |
-
>>> class Bar(NiceRepr):
|
359 |
-
... pass
|
360 |
-
>>> bar = Bar()
|
361 |
-
>>> import pytest
|
362 |
-
>>> with pytest.warns(None) as record:
|
363 |
-
>>> assert 'object at' in str(bar)
|
364 |
-
>>> assert 'object at' in repr(bar)
|
365 |
-
|
366 |
-
Example:
|
367 |
-
>>> class Baz(NiceRepr):
|
368 |
-
... def __len__(self):
|
369 |
-
... return 5
|
370 |
-
>>> baz = Baz()
|
371 |
-
>>> assert str(baz) == '<Baz(5)>'
|
372 |
-
"""
|
373 |
-
|
374 |
-
def __nice__(self):
|
375 |
-
"""str: a "nice" summary string describing this module"""
|
376 |
-
if hasattr(self, "__len__"):
|
377 |
-
# It is a common pattern for objects to use __len__ in __nice__
|
378 |
-
# As a convenience we define a default __nice__ for these objects
|
379 |
-
return str(len(self))
|
380 |
-
else:
|
381 |
-
# In all other cases force the subclass to overload __nice__
|
382 |
-
raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}")
|
383 |
-
|
384 |
-
def __repr__(self):
|
385 |
-
"""str: the string of the module"""
|
386 |
-
try:
|
387 |
-
nice = self.__nice__()
|
388 |
-
classname = self.__class__.__name__
|
389 |
-
return f"<{classname}({nice}) at {hex(id(self))}>"
|
390 |
-
except NotImplementedError as ex:
|
391 |
-
warnings.warn(str(ex), category=RuntimeWarning)
|
392 |
-
return object.__repr__(self)
|
393 |
-
|
394 |
-
def __str__(self):
|
395 |
-
"""str: the string of the module"""
|
396 |
-
try:
|
397 |
-
classname = self.__class__.__name__
|
398 |
-
nice = self.__nice__()
|
399 |
-
return f"<{classname}({nice})>"
|
400 |
-
except NotImplementedError as ex:
|
401 |
-
warnings.warn(str(ex), category=RuntimeWarning)
|
402 |
-
return object.__repr__(self)
|
403 |
-
|
404 |
-
|
405 |
-
def ensure_rng(rng=None):
|
406 |
-
"""Coerces input into a random number generator.
|
407 |
-
|
408 |
-
If the input is None, then a global random state is returned.
|
409 |
-
|
410 |
-
If the input is a numeric value, then that is used as a seed to construct a
|
411 |
-
random state. Otherwise the input is returned as-is.
|
412 |
-
|
413 |
-
Adapted from [1]_.
|
414 |
-
|
415 |
-
Args:
|
416 |
-
rng (int | numpy.random.RandomState | None):
|
417 |
-
if None, then defaults to the global rng. Otherwise this can be an
|
418 |
-
integer or a RandomState class
|
419 |
-
Returns:
|
420 |
-
(numpy.random.RandomState) : rng -
|
421 |
-
a numpy random number generator
|
422 |
-
|
423 |
-
References:
|
424 |
-
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
|
425 |
-
"""
|
426 |
-
|
427 |
-
if rng is None:
|
428 |
-
rng = np.random.mtrand._rand
|
429 |
-
elif isinstance(rng, int):
|
430 |
-
rng = np.random.RandomState(rng)
|
431 |
-
else:
|
432 |
-
rng = rng
|
433 |
-
return rng
|
434 |
-
|
435 |
-
|
436 |
-
def random_boxes(num=1, scale=1, rng=None):
|
437 |
-
"""Simple version of ``kwimage.Boxes.random``
|
438 |
-
|
439 |
-
Returns:
|
440 |
-
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
|
441 |
-
|
442 |
-
References:
|
443 |
-
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
|
444 |
-
|
445 |
-
Example:
|
446 |
-
>>> num = 3
|
447 |
-
>>> scale = 512
|
448 |
-
>>> rng = 0
|
449 |
-
>>> boxes = random_boxes(num, scale, rng)
|
450 |
-
>>> print(boxes)
|
451 |
-
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
|
452 |
-
[216.9113, 330.6978, 224.0446, 456.5878],
|
453 |
-
[405.3632, 196.3221, 493.3953, 270.7942]])
|
454 |
-
"""
|
455 |
-
rng = ensure_rng(rng)
|
456 |
-
|
457 |
-
tlbr = rng.rand(num, 4).astype(np.float32)
|
458 |
-
|
459 |
-
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
|
460 |
-
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
|
461 |
-
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
|
462 |
-
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
|
463 |
-
|
464 |
-
tlbr[:, 0] = tl_x * scale
|
465 |
-
tlbr[:, 1] = tl_y * scale
|
466 |
-
tlbr[:, 2] = br_x * scale
|
467 |
-
tlbr[:, 3] = br_y * scale
|
468 |
-
|
469 |
-
boxes = torch.from_numpy(tlbr)
|
470 |
-
return boxes
|
471 |
-
|
472 |
-
|
473 |
-
class ModelEma(torch.nn.Module):
|
474 |
-
def __init__(self, model, decay=0.9997, device=None):
|
475 |
-
super(ModelEma, self).__init__()
|
476 |
-
# make a copy of the model for accumulating moving average of weights
|
477 |
-
self.module = deepcopy(model)
|
478 |
-
self.module.eval()
|
479 |
-
|
480 |
-
# import ipdb; ipdb.set_trace()
|
481 |
-
|
482 |
-
self.decay = decay
|
483 |
-
self.device = device # perform ema on different device from model if set
|
484 |
-
if self.device is not None:
|
485 |
-
self.module.to(device=device)
|
486 |
-
|
487 |
-
def _update(self, model, update_fn):
|
488 |
-
with torch.no_grad():
|
489 |
-
for ema_v, model_v in zip(
|
490 |
-
self.module.state_dict().values(), model.state_dict().values()
|
491 |
-
):
|
492 |
-
if self.device is not None:
|
493 |
-
model_v = model_v.to(device=self.device)
|
494 |
-
ema_v.copy_(update_fn(ema_v, model_v))
|
495 |
-
|
496 |
-
def update(self, model):
|
497 |
-
self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m)
|
498 |
-
|
499 |
-
def set(self, model):
|
500 |
-
self._update(model, update_fn=lambda e, m: m)
|
501 |
-
|
502 |
-
|
503 |
-
class BestMetricSingle:
|
504 |
-
def __init__(self, init_res=0.0, better="large") -> None:
|
505 |
-
self.init_res = init_res
|
506 |
-
self.best_res = init_res
|
507 |
-
self.best_ep = -1
|
508 |
-
|
509 |
-
self.better = better
|
510 |
-
assert better in ["large", "small"]
|
511 |
-
|
512 |
-
def isbetter(self, new_res, old_res):
|
513 |
-
if self.better == "large":
|
514 |
-
return new_res > old_res
|
515 |
-
if self.better == "small":
|
516 |
-
return new_res < old_res
|
517 |
-
|
518 |
-
def update(self, new_res, ep):
|
519 |
-
if self.isbetter(new_res, self.best_res):
|
520 |
-
self.best_res = new_res
|
521 |
-
self.best_ep = ep
|
522 |
-
return True
|
523 |
-
return False
|
524 |
-
|
525 |
-
def __str__(self) -> str:
|
526 |
-
return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep)
|
527 |
-
|
528 |
-
def __repr__(self) -> str:
|
529 |
-
return self.__str__()
|
530 |
-
|
531 |
-
def summary(self) -> dict:
|
532 |
-
return {
|
533 |
-
"best_res": self.best_res,
|
534 |
-
"best_ep": self.best_ep,
|
535 |
-
}
|
536 |
-
|
537 |
-
|
538 |
-
class BestMetricHolder:
|
539 |
-
def __init__(self, init_res=0.0, better="large", use_ema=False) -> None:
|
540 |
-
self.best_all = BestMetricSingle(init_res, better)
|
541 |
-
self.use_ema = use_ema
|
542 |
-
if use_ema:
|
543 |
-
self.best_ema = BestMetricSingle(init_res, better)
|
544 |
-
self.best_regular = BestMetricSingle(init_res, better)
|
545 |
-
|
546 |
-
def update(self, new_res, epoch, is_ema=False):
|
547 |
-
"""
|
548 |
-
return if the results is the best.
|
549 |
-
"""
|
550 |
-
if not self.use_ema:
|
551 |
-
return self.best_all.update(new_res, epoch)
|
552 |
-
else:
|
553 |
-
if is_ema:
|
554 |
-
self.best_ema.update(new_res, epoch)
|
555 |
-
return self.best_all.update(new_res, epoch)
|
556 |
-
else:
|
557 |
-
self.best_regular.update(new_res, epoch)
|
558 |
-
return self.best_all.update(new_res, epoch)
|
559 |
-
|
560 |
-
def summary(self):
|
561 |
-
if not self.use_ema:
|
562 |
-
return self.best_all.summary()
|
563 |
-
|
564 |
-
res = {}
|
565 |
-
res.update({f"all_{k}": v for k, v in self.best_all.summary().items()})
|
566 |
-
res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()})
|
567 |
-
res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()})
|
568 |
-
return res
|
569 |
-
|
570 |
-
def __repr__(self) -> str:
|
571 |
-
return json.dumps(self.summary(), indent=2)
|
572 |
-
|
573 |
-
def __str__(self) -> str:
|
574 |
-
return self.__repr__()
|
575 |
-
|
576 |
-
|
577 |
-
def targets_to(targets: List[Dict[str, Any]], device):
|
578 |
-
"""Moves the target dicts to the given device."""
|
579 |
-
excluded_keys = [
|
580 |
-
"questionId",
|
581 |
-
"tokens_positive",
|
582 |
-
"strings_positive",
|
583 |
-
"tokens",
|
584 |
-
"dataset_name",
|
585 |
-
"sentence_id",
|
586 |
-
"original_img_id",
|
587 |
-
"nb_eval",
|
588 |
-
"task_id",
|
589 |
-
"original_id",
|
590 |
-
"token_span",
|
591 |
-
"caption",
|
592 |
-
"dataset_type",
|
593 |
-
]
|
594 |
-
return [
|
595 |
-
{k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets
|
596 |
-
]
|
597 |
-
|
598 |
-
|
599 |
-
def get_phrases_from_posmap(
|
600 |
-
posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255
|
601 |
-
):
|
602 |
-
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
|
603 |
-
if posmap.dim() == 1:
|
604 |
-
posmap[0: left_idx + 1] = False
|
605 |
-
posmap[right_idx:] = False
|
606 |
-
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
|
607 |
-
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]
|
608 |
-
return tokenizer.decode(token_ids)
|
609 |
-
else:
|
610 |
-
raise NotImplementedError("posmap must be 1-dim")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/common.py
DELETED
@@ -1,424 +0,0 @@
|
|
1 |
-
# common.py
|
2 |
-
from .core import *
|
3 |
-
from .helpers import delimited_list, any_open_tag, any_close_tag
|
4 |
-
from datetime import datetime
|
5 |
-
|
6 |
-
|
7 |
-
# some other useful expressions - using lower-case class name since we are really using this as a namespace
|
8 |
-
class pyparsing_common:
|
9 |
-
"""Here are some common low-level expressions that may be useful in
|
10 |
-
jump-starting parser development:
|
11 |
-
|
12 |
-
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
|
13 |
-
:class:`scientific notation<sci_real>`)
|
14 |
-
- common :class:`programming identifiers<identifier>`
|
15 |
-
- network addresses (:class:`MAC<mac_address>`,
|
16 |
-
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
|
17 |
-
- ISO8601 :class:`dates<iso8601_date>` and
|
18 |
-
:class:`datetime<iso8601_datetime>`
|
19 |
-
- :class:`UUID<uuid>`
|
20 |
-
- :class:`comma-separated list<comma_separated_list>`
|
21 |
-
- :class:`url`
|
22 |
-
|
23 |
-
Parse actions:
|
24 |
-
|
25 |
-
- :class:`convertToInteger`
|
26 |
-
- :class:`convertToFloat`
|
27 |
-
- :class:`convertToDate`
|
28 |
-
- :class:`convertToDatetime`
|
29 |
-
- :class:`stripHTMLTags`
|
30 |
-
- :class:`upcaseTokens`
|
31 |
-
- :class:`downcaseTokens`
|
32 |
-
|
33 |
-
Example::
|
34 |
-
|
35 |
-
pyparsing_common.number.runTests('''
|
36 |
-
# any int or real number, returned as the appropriate type
|
37 |
-
100
|
38 |
-
-100
|
39 |
-
+100
|
40 |
-
3.14159
|
41 |
-
6.02e23
|
42 |
-
1e-12
|
43 |
-
''')
|
44 |
-
|
45 |
-
pyparsing_common.fnumber.runTests('''
|
46 |
-
# any int or real number, returned as float
|
47 |
-
100
|
48 |
-
-100
|
49 |
-
+100
|
50 |
-
3.14159
|
51 |
-
6.02e23
|
52 |
-
1e-12
|
53 |
-
''')
|
54 |
-
|
55 |
-
pyparsing_common.hex_integer.runTests('''
|
56 |
-
# hex numbers
|
57 |
-
100
|
58 |
-
FF
|
59 |
-
''')
|
60 |
-
|
61 |
-
pyparsing_common.fraction.runTests('''
|
62 |
-
# fractions
|
63 |
-
1/2
|
64 |
-
-3/4
|
65 |
-
''')
|
66 |
-
|
67 |
-
pyparsing_common.mixed_integer.runTests('''
|
68 |
-
# mixed fractions
|
69 |
-
1
|
70 |
-
1/2
|
71 |
-
-3/4
|
72 |
-
1-3/4
|
73 |
-
''')
|
74 |
-
|
75 |
-
import uuid
|
76 |
-
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
|
77 |
-
pyparsing_common.uuid.runTests('''
|
78 |
-
# uuid
|
79 |
-
12345678-1234-5678-1234-567812345678
|
80 |
-
''')
|
81 |
-
|
82 |
-
prints::
|
83 |
-
|
84 |
-
# any int or real number, returned as the appropriate type
|
85 |
-
100
|
86 |
-
[100]
|
87 |
-
|
88 |
-
-100
|
89 |
-
[-100]
|
90 |
-
|
91 |
-
+100
|
92 |
-
[100]
|
93 |
-
|
94 |
-
3.14159
|
95 |
-
[3.14159]
|
96 |
-
|
97 |
-
6.02e23
|
98 |
-
[6.02e+23]
|
99 |
-
|
100 |
-
1e-12
|
101 |
-
[1e-12]
|
102 |
-
|
103 |
-
# any int or real number, returned as float
|
104 |
-
100
|
105 |
-
[100.0]
|
106 |
-
|
107 |
-
-100
|
108 |
-
[-100.0]
|
109 |
-
|
110 |
-
+100
|
111 |
-
[100.0]
|
112 |
-
|
113 |
-
3.14159
|
114 |
-
[3.14159]
|
115 |
-
|
116 |
-
6.02e23
|
117 |
-
[6.02e+23]
|
118 |
-
|
119 |
-
1e-12
|
120 |
-
[1e-12]
|
121 |
-
|
122 |
-
# hex numbers
|
123 |
-
100
|
124 |
-
[256]
|
125 |
-
|
126 |
-
FF
|
127 |
-
[255]
|
128 |
-
|
129 |
-
# fractions
|
130 |
-
1/2
|
131 |
-
[0.5]
|
132 |
-
|
133 |
-
-3/4
|
134 |
-
[-0.75]
|
135 |
-
|
136 |
-
# mixed fractions
|
137 |
-
1
|
138 |
-
[1]
|
139 |
-
|
140 |
-
1/2
|
141 |
-
[0.5]
|
142 |
-
|
143 |
-
-3/4
|
144 |
-
[-0.75]
|
145 |
-
|
146 |
-
1-3/4
|
147 |
-
[1.75]
|
148 |
-
|
149 |
-
# uuid
|
150 |
-
12345678-1234-5678-1234-567812345678
|
151 |
-
[UUID('12345678-1234-5678-1234-567812345678')]
|
152 |
-
"""
|
153 |
-
|
154 |
-
convert_to_integer = token_map(int)
|
155 |
-
"""
|
156 |
-
Parse action for converting parsed integers to Python int
|
157 |
-
"""
|
158 |
-
|
159 |
-
convert_to_float = token_map(float)
|
160 |
-
"""
|
161 |
-
Parse action for converting parsed numbers to Python float
|
162 |
-
"""
|
163 |
-
|
164 |
-
integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
|
165 |
-
"""expression that parses an unsigned integer, returns an int"""
|
166 |
-
|
167 |
-
hex_integer = (
|
168 |
-
Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
|
169 |
-
)
|
170 |
-
"""expression that parses a hexadecimal integer, returns an int"""
|
171 |
-
|
172 |
-
signed_integer = (
|
173 |
-
Regex(r"[+-]?\d+")
|
174 |
-
.set_name("signed integer")
|
175 |
-
.set_parse_action(convert_to_integer)
|
176 |
-
)
|
177 |
-
"""expression that parses an integer with optional leading sign, returns an int"""
|
178 |
-
|
179 |
-
fraction = (
|
180 |
-
signed_integer().set_parse_action(convert_to_float)
|
181 |
-
+ "/"
|
182 |
-
+ signed_integer().set_parse_action(convert_to_float)
|
183 |
-
).set_name("fraction")
|
184 |
-
"""fractional expression of an integer divided by an integer, returns a float"""
|
185 |
-
fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
|
186 |
-
|
187 |
-
mixed_integer = (
|
188 |
-
fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
|
189 |
-
).set_name("fraction or mixed integer-fraction")
|
190 |
-
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
|
191 |
-
mixed_integer.add_parse_action(sum)
|
192 |
-
|
193 |
-
real = (
|
194 |
-
Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
|
195 |
-
.set_name("real number")
|
196 |
-
.set_parse_action(convert_to_float)
|
197 |
-
)
|
198 |
-
"""expression that parses a floating point number and returns a float"""
|
199 |
-
|
200 |
-
sci_real = (
|
201 |
-
Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
|
202 |
-
.set_name("real number with scientific notation")
|
203 |
-
.set_parse_action(convert_to_float)
|
204 |
-
)
|
205 |
-
"""expression that parses a floating point number with optional
|
206 |
-
scientific notation and returns a float"""
|
207 |
-
|
208 |
-
# streamlining this expression makes the docs nicer-looking
|
209 |
-
number = (sci_real | real | signed_integer).setName("number").streamline()
|
210 |
-
"""any numeric expression, returns the corresponding Python type"""
|
211 |
-
|
212 |
-
fnumber = (
|
213 |
-
Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
|
214 |
-
.set_name("fnumber")
|
215 |
-
.set_parse_action(convert_to_float)
|
216 |
-
)
|
217 |
-
"""any int or real number, returned as float"""
|
218 |
-
|
219 |
-
identifier = Word(identchars, identbodychars).set_name("identifier")
|
220 |
-
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
|
221 |
-
|
222 |
-
ipv4_address = Regex(
|
223 |
-
r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
|
224 |
-
).set_name("IPv4 address")
|
225 |
-
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
|
226 |
-
|
227 |
-
_ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
|
228 |
-
_full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
|
229 |
-
"full IPv6 address"
|
230 |
-
)
|
231 |
-
_short_ipv6_address = (
|
232 |
-
Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
233 |
-
+ "::"
|
234 |
-
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
|
235 |
-
).set_name("short IPv6 address")
|
236 |
-
_short_ipv6_address.add_condition(
|
237 |
-
lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
|
238 |
-
)
|
239 |
-
_mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
|
240 |
-
ipv6_address = Combine(
|
241 |
-
(_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
|
242 |
-
"IPv6 address"
|
243 |
-
)
|
244 |
-
).set_name("IPv6 address")
|
245 |
-
"IPv6 address (long, short, or mixed form)"
|
246 |
-
|
247 |
-
mac_address = Regex(
|
248 |
-
r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
|
249 |
-
).set_name("MAC address")
|
250 |
-
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
|
251 |
-
|
252 |
-
@staticmethod
|
253 |
-
def convert_to_date(fmt: str = "%Y-%m-%d"):
|
254 |
-
"""
|
255 |
-
Helper to create a parse action for converting parsed date string to Python datetime.date
|
256 |
-
|
257 |
-
Params -
|
258 |
-
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
|
259 |
-
|
260 |
-
Example::
|
261 |
-
|
262 |
-
date_expr = pyparsing_common.iso8601_date.copy()
|
263 |
-
date_expr.setParseAction(pyparsing_common.convertToDate())
|
264 |
-
print(date_expr.parseString("1999-12-31"))
|
265 |
-
|
266 |
-
prints::
|
267 |
-
|
268 |
-
[datetime.date(1999, 12, 31)]
|
269 |
-
"""
|
270 |
-
|
271 |
-
def cvt_fn(ss, ll, tt):
|
272 |
-
try:
|
273 |
-
return datetime.strptime(tt[0], fmt).date()
|
274 |
-
except ValueError as ve:
|
275 |
-
raise ParseException(ss, ll, str(ve))
|
276 |
-
|
277 |
-
return cvt_fn
|
278 |
-
|
279 |
-
@staticmethod
|
280 |
-
def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
|
281 |
-
"""Helper to create a parse action for converting parsed
|
282 |
-
datetime string to Python datetime.datetime
|
283 |
-
|
284 |
-
Params -
|
285 |
-
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
|
286 |
-
|
287 |
-
Example::
|
288 |
-
|
289 |
-
dt_expr = pyparsing_common.iso8601_datetime.copy()
|
290 |
-
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
|
291 |
-
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
|
292 |
-
|
293 |
-
prints::
|
294 |
-
|
295 |
-
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
|
296 |
-
"""
|
297 |
-
|
298 |
-
def cvt_fn(s, l, t):
|
299 |
-
try:
|
300 |
-
return datetime.strptime(t[0], fmt)
|
301 |
-
except ValueError as ve:
|
302 |
-
raise ParseException(s, l, str(ve))
|
303 |
-
|
304 |
-
return cvt_fn
|
305 |
-
|
306 |
-
iso8601_date = Regex(
|
307 |
-
r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
|
308 |
-
).set_name("ISO8601 date")
|
309 |
-
"ISO8601 date (``yyyy-mm-dd``)"
|
310 |
-
|
311 |
-
iso8601_datetime = Regex(
|
312 |
-
r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
|
313 |
-
).set_name("ISO8601 datetime")
|
314 |
-
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
|
315 |
-
|
316 |
-
uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
|
317 |
-
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
|
318 |
-
|
319 |
-
_html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
|
320 |
-
|
321 |
-
@staticmethod
|
322 |
-
def strip_html_tags(s: str, l: int, tokens: ParseResults):
|
323 |
-
"""Parse action to remove HTML tags from web page HTML source
|
324 |
-
|
325 |
-
Example::
|
326 |
-
|
327 |
-
# strip HTML links from normal text
|
328 |
-
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
|
329 |
-
td, td_end = makeHTMLTags("TD")
|
330 |
-
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
|
331 |
-
print(table_text.parseString(text).body)
|
332 |
-
|
333 |
-
Prints::
|
334 |
-
|
335 |
-
More info at the pyparsing wiki page
|
336 |
-
"""
|
337 |
-
return pyparsing_common._html_stripper.transform_string(tokens[0])
|
338 |
-
|
339 |
-
_commasepitem = (
|
340 |
-
Combine(
|
341 |
-
OneOrMore(
|
342 |
-
~Literal(",")
|
343 |
-
+ ~LineEnd()
|
344 |
-
+ Word(printables, exclude_chars=",")
|
345 |
-
+ Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
|
346 |
-
)
|
347 |
-
)
|
348 |
-
.streamline()
|
349 |
-
.set_name("commaItem")
|
350 |
-
)
|
351 |
-
comma_separated_list = delimited_list(
|
352 |
-
Opt(quoted_string.copy() | _commasepitem, default="")
|
353 |
-
).set_name("comma separated list")
|
354 |
-
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
|
355 |
-
|
356 |
-
upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
|
357 |
-
"""Parse action to convert tokens to upper case."""
|
358 |
-
|
359 |
-
downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
|
360 |
-
"""Parse action to convert tokens to lower case."""
|
361 |
-
|
362 |
-
# fmt: off
|
363 |
-
url = Regex(
|
364 |
-
# https://mathiasbynens.be/demo/url-regex
|
365 |
-
# https://gist.github.com/dperini/729294
|
366 |
-
r"^" +
|
367 |
-
# protocol identifier (optional)
|
368 |
-
# short syntax // still required
|
369 |
-
r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
|
370 |
-
# user:pass BasicAuth (optional)
|
371 |
-
r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
|
372 |
-
r"(?P<host>" +
|
373 |
-
# IP address exclusion
|
374 |
-
# private & local networks
|
375 |
-
r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
|
376 |
-
r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
|
377 |
-
r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
|
378 |
-
# IP address dotted notation octets
|
379 |
-
# excludes loopback network 0.0.0.0
|
380 |
-
# excludes reserved space >= 224.0.0.0
|
381 |
-
# excludes network & broadcast addresses
|
382 |
-
# (first & last IP address of each class)
|
383 |
-
r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
|
384 |
-
r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
|
385 |
-
r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
|
386 |
-
r"|" +
|
387 |
-
# host & domain names, may end with dot
|
388 |
-
# can be replaced by a shortest alternative
|
389 |
-
# (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
|
390 |
-
r"(?:" +
|
391 |
-
r"(?:" +
|
392 |
-
r"[a-z0-9\u00a1-\uffff]" +
|
393 |
-
r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
|
394 |
-
r")?" +
|
395 |
-
r"[a-z0-9\u00a1-\uffff]\." +
|
396 |
-
r")+" +
|
397 |
-
# TLD identifier name, may end with dot
|
398 |
-
r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
|
399 |
-
r")" +
|
400 |
-
# port number (optional)
|
401 |
-
r"(:(?P<port>\d{2,5}))?" +
|
402 |
-
# resource path (optional)
|
403 |
-
r"(?P<path>\/[^?# ]*)?" +
|
404 |
-
# query string (optional)
|
405 |
-
r"(\?(?P<query>[^#]*))?" +
|
406 |
-
# fragment (optional)
|
407 |
-
r"(#(?P<fragment>\S*))?" +
|
408 |
-
r"$"
|
409 |
-
).set_name("url")
|
410 |
-
# fmt: on
|
411 |
-
|
412 |
-
# pre-PEP8 compatibility names
|
413 |
-
convertToInteger = convert_to_integer
|
414 |
-
convertToFloat = convert_to_float
|
415 |
-
convertToDate = convert_to_date
|
416 |
-
convertToDatetime = convert_to_datetime
|
417 |
-
stripHTMLTags = strip_html_tags
|
418 |
-
upcaseTokens = upcase_tokens
|
419 |
-
downcaseTokens = downcase_tokens
|
420 |
-
|
421 |
-
|
422 |
-
_builtin_exprs = [
|
423 |
-
v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
|
424 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from .mask_rcnn_R_101_FPN_100ep_LSJ import (
|
2 |
-
dataloader,
|
3 |
-
lr_multiplier,
|
4 |
-
model,
|
5 |
-
optimizer,
|
6 |
-
train,
|
7 |
-
)
|
8 |
-
|
9 |
-
train.max_iter *= 4 # 100ep -> 400ep
|
10 |
-
|
11 |
-
lr_multiplier.scheduler.milestones = [
|
12 |
-
milestone * 4 for milestone in lr_multiplier.scheduler.milestones
|
13 |
-
]
|
14 |
-
lr_multiplier.scheduler.num_updates = train.max_iter
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BartPoint/VoiceChange_Beta/util.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import asyncio
|
3 |
-
from io import BytesIO
|
4 |
-
|
5 |
-
from fairseq import checkpoint_utils
|
6 |
-
|
7 |
-
import torch
|
8 |
-
|
9 |
-
import edge_tts
|
10 |
-
import librosa
|
11 |
-
|
12 |
-
|
13 |
-
# https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/blob/main/config.py#L43-L55 # noqa
|
14 |
-
def has_mps() -> bool:
|
15 |
-
if sys.platform != "darwin":
|
16 |
-
return False
|
17 |
-
else:
|
18 |
-
if not getattr(torch, 'has_mps', False):
|
19 |
-
return False
|
20 |
-
|
21 |
-
try:
|
22 |
-
torch.zeros(1).to(torch.device("mps"))
|
23 |
-
return True
|
24 |
-
except Exception:
|
25 |
-
return False
|
26 |
-
|
27 |
-
|
28 |
-
def is_half(device: str) -> bool:
|
29 |
-
if not device.startswith('cuda'):
|
30 |
-
return False
|
31 |
-
else:
|
32 |
-
gpu_name = torch.cuda.get_device_name(
|
33 |
-
int(device.split(':')[-1])
|
34 |
-
).upper()
|
35 |
-
|
36 |
-
# ...regex?
|
37 |
-
if (
|
38 |
-
('16' in gpu_name and 'V100' not in gpu_name)
|
39 |
-
or 'P40' in gpu_name
|
40 |
-
or '1060' in gpu_name
|
41 |
-
or '1070' in gpu_name
|
42 |
-
or '1080' in gpu_name
|
43 |
-
):
|
44 |
-
return False
|
45 |
-
|
46 |
-
return True
|
47 |
-
|
48 |
-
|
49 |
-
def load_hubert_model(device: str, model_path: str = 'hubert_base.pt'):
|
50 |
-
model = checkpoint_utils.load_model_ensemble_and_task(
|
51 |
-
[model_path]
|
52 |
-
)[0][0].to(device)
|
53 |
-
|
54 |
-
if is_half(device):
|
55 |
-
return model.half()
|
56 |
-
else:
|
57 |
-
return model.float()
|
58 |
-
|
59 |
-
|
60 |
-
async def call_edge_tts(speaker_name: str, text: str):
|
61 |
-
tts_com = edge_tts.Communicate(text, speaker_name)
|
62 |
-
tts_raw = b''
|
63 |
-
|
64 |
-
# Stream TTS audio to bytes
|
65 |
-
async for chunk in tts_com.stream():
|
66 |
-
if chunk['type'] == 'audio':
|
67 |
-
tts_raw += chunk['data']
|
68 |
-
|
69 |
-
# Convert mp3 stream to wav
|
70 |
-
ffmpeg_proc = await asyncio.create_subprocess_exec(
|
71 |
-
'ffmpeg',
|
72 |
-
'-f', 'mp3',
|
73 |
-
'-i', '-',
|
74 |
-
'-f', 'wav',
|
75 |
-
'-',
|
76 |
-
stdin=asyncio.subprocess.PIPE,
|
77 |
-
stdout=asyncio.subprocess.PIPE
|
78 |
-
)
|
79 |
-
(tts_wav, _) = await ffmpeg_proc.communicate(tts_raw)
|
80 |
-
|
81 |
-
return librosa.load(BytesIO(tts_wav))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/resolvelib/requirements.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
from pip._vendor.packaging.specifiers import SpecifierSet
|
2 |
-
from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
|
3 |
-
|
4 |
-
from pip._internal.req.req_install import InstallRequirement
|
5 |
-
|
6 |
-
from .base import Candidate, CandidateLookup, Requirement, format_name
|
7 |
-
|
8 |
-
|
9 |
-
class ExplicitRequirement(Requirement):
|
10 |
-
def __init__(self, candidate: Candidate) -> None:
|
11 |
-
self.candidate = candidate
|
12 |
-
|
13 |
-
def __str__(self) -> str:
|
14 |
-
return str(self.candidate)
|
15 |
-
|
16 |
-
def __repr__(self) -> str:
|
17 |
-
return "{class_name}({candidate!r})".format(
|
18 |
-
class_name=self.__class__.__name__,
|
19 |
-
candidate=self.candidate,
|
20 |
-
)
|
21 |
-
|
22 |
-
@property
|
23 |
-
def project_name(self) -> NormalizedName:
|
24 |
-
# No need to canonicalize - the candidate did this
|
25 |
-
return self.candidate.project_name
|
26 |
-
|
27 |
-
@property
|
28 |
-
def name(self) -> str:
|
29 |
-
# No need to canonicalize - the candidate did this
|
30 |
-
return self.candidate.name
|
31 |
-
|
32 |
-
def format_for_error(self) -> str:
|
33 |
-
return self.candidate.format_for_error()
|
34 |
-
|
35 |
-
def get_candidate_lookup(self) -> CandidateLookup:
|
36 |
-
return self.candidate, None
|
37 |
-
|
38 |
-
def is_satisfied_by(self, candidate: Candidate) -> bool:
|
39 |
-
return candidate == self.candidate
|
40 |
-
|
41 |
-
|
42 |
-
class SpecifierRequirement(Requirement):
|
43 |
-
def __init__(self, ireq: InstallRequirement) -> None:
|
44 |
-
assert ireq.link is None, "This is a link, not a specifier"
|
45 |
-
self._ireq = ireq
|
46 |
-
self._extras = frozenset(ireq.extras)
|
47 |
-
|
48 |
-
def __str__(self) -> str:
|
49 |
-
return str(self._ireq.req)
|
50 |
-
|
51 |
-
def __repr__(self) -> str:
|
52 |
-
return "{class_name}({requirement!r})".format(
|
53 |
-
class_name=self.__class__.__name__,
|
54 |
-
requirement=str(self._ireq.req),
|
55 |
-
)
|
56 |
-
|
57 |
-
@property
|
58 |
-
def project_name(self) -> NormalizedName:
|
59 |
-
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
|
60 |
-
return canonicalize_name(self._ireq.req.name)
|
61 |
-
|
62 |
-
@property
|
63 |
-
def name(self) -> str:
|
64 |
-
return format_name(self.project_name, self._extras)
|
65 |
-
|
66 |
-
def format_for_error(self) -> str:
|
67 |
-
# Convert comma-separated specifiers into "A, B, ..., F and G"
|
68 |
-
# This makes the specifier a bit more "human readable", without
|
69 |
-
# risking a change in meaning. (Hopefully! Not all edge cases have
|
70 |
-
# been checked)
|
71 |
-
parts = [s.strip() for s in str(self).split(",")]
|
72 |
-
if len(parts) == 0:
|
73 |
-
return ""
|
74 |
-
elif len(parts) == 1:
|
75 |
-
return parts[0]
|
76 |
-
|
77 |
-
return ", ".join(parts[:-1]) + " and " + parts[-1]
|
78 |
-
|
79 |
-
def get_candidate_lookup(self) -> CandidateLookup:
|
80 |
-
return None, self._ireq
|
81 |
-
|
82 |
-
def is_satisfied_by(self, candidate: Candidate) -> bool:
|
83 |
-
assert candidate.name == self.name, (
|
84 |
-
f"Internal issue: Candidate is not for this requirement "
|
85 |
-
f"{candidate.name} vs {self.name}"
|
86 |
-
)
|
87 |
-
# We can safely always allow prereleases here since PackageFinder
|
88 |
-
# already implements the prerelease logic, and would have filtered out
|
89 |
-
# prerelease candidates if the user does not expect them.
|
90 |
-
assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
|
91 |
-
spec = self._ireq.req.specifier
|
92 |
-
return spec.contains(candidate.version, prereleases=True)
|
93 |
-
|
94 |
-
|
95 |
-
class RequiresPythonRequirement(Requirement):
|
96 |
-
"""A requirement representing Requires-Python metadata."""
|
97 |
-
|
98 |
-
def __init__(self, specifier: SpecifierSet, match: Candidate) -> None:
|
99 |
-
self.specifier = specifier
|
100 |
-
self._candidate = match
|
101 |
-
|
102 |
-
def __str__(self) -> str:
|
103 |
-
return f"Python {self.specifier}"
|
104 |
-
|
105 |
-
def __repr__(self) -> str:
|
106 |
-
return "{class_name}({specifier!r})".format(
|
107 |
-
class_name=self.__class__.__name__,
|
108 |
-
specifier=str(self.specifier),
|
109 |
-
)
|
110 |
-
|
111 |
-
@property
|
112 |
-
def project_name(self) -> NormalizedName:
|
113 |
-
return self._candidate.project_name
|
114 |
-
|
115 |
-
@property
|
116 |
-
def name(self) -> str:
|
117 |
-
return self._candidate.name
|
118 |
-
|
119 |
-
def format_for_error(self) -> str:
|
120 |
-
return str(self)
|
121 |
-
|
122 |
-
def get_candidate_lookup(self) -> CandidateLookup:
|
123 |
-
if self.specifier.contains(self._candidate.version, prereleases=True):
|
124 |
-
return self._candidate, None
|
125 |
-
return None, None
|
126 |
-
|
127 |
-
def is_satisfied_by(self, candidate: Candidate) -> bool:
|
128 |
-
assert candidate.name == self._candidate.name, "Not Python candidate"
|
129 |
-
# We can safely always allow prereleases here since PackageFinder
|
130 |
-
# already implements the prerelease logic, and would have filtered out
|
131 |
-
# prerelease candidates if the user does not expect them.
|
132 |
-
return self.specifier.contains(candidate.version, prereleases=True)
|
133 |
-
|
134 |
-
|
135 |
-
class UnsatisfiableRequirement(Requirement):
|
136 |
-
"""A requirement that cannot be satisfied."""
|
137 |
-
|
138 |
-
def __init__(self, name: NormalizedName) -> None:
|
139 |
-
self._name = name
|
140 |
-
|
141 |
-
def __str__(self) -> str:
|
142 |
-
return f"{self._name} (unavailable)"
|
143 |
-
|
144 |
-
def __repr__(self) -> str:
|
145 |
-
return "{class_name}({name!r})".format(
|
146 |
-
class_name=self.__class__.__name__,
|
147 |
-
name=str(self._name),
|
148 |
-
)
|
149 |
-
|
150 |
-
@property
|
151 |
-
def project_name(self) -> NormalizedName:
|
152 |
-
return self._name
|
153 |
-
|
154 |
-
@property
|
155 |
-
def name(self) -> str:
|
156 |
-
return self._name
|
157 |
-
|
158 |
-
def format_for_error(self) -> str:
|
159 |
-
return str(self)
|
160 |
-
|
161 |
-
def get_candidate_lookup(self) -> CandidateLookup:
|
162 |
-
return None, None
|
163 |
-
|
164 |
-
def is_satisfied_by(self, candidate: Candidate) -> bool:
|
165 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BisratWorku/Bear_classifier/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Bear Classifier
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BlueRey/MendoBERT_QA/app.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from transformers import pipeline
|
3 |
-
|
4 |
-
model = pipeline("question-answering", model="/home/user/app/MendoBERT/", tokenizer="indolem/indobert-base-uncased")
|
5 |
-
basemodel = pipeline("question-answering", model="/home/user/app/IndoLEM/", tokenizer="indolem/indobert-base-uncased")
|
6 |
-
|
7 |
-
st.title(':blue[MendoBERT] - Question Answering 🤔 💭')
|
8 |
-
|
9 |
-
if 'context' not in st.session_state:
|
10 |
-
st.session_state['options'] = ""
|
11 |
-
|
12 |
-
if 'question' not in st.session_state:
|
13 |
-
st.session_state['options'] = ""
|
14 |
-
|
15 |
-
def button1_callback():
|
16 |
-
st.session_state['context'] = "Acrokeratosis paraneoplastica (Sindrom Bazex) dengan karsinoma sel skuamosa orofaringeal. Seorang pria kulit putih berusia 65 tahun menunjukkan semua gambaran klinis akrokeratosis paraneoplastica dari Bazex, ditandai dengan eritema keunguan dan penskalaan hidung, heliks aural, jari tangan, dan kaki, dengan keratoderma dan distrofi kuku yang parah. Pemeriksaan pasien untuk kemungkinan keganasan terkait mengungkapkan karsinoma sel skuamosa asimtomatik di daerah orofaringeal. Lesi kulit sembuh hampir seluruhnya setelah terapi radiasi neoplasma, tetapi onikodistrofi tetap ada. Laporan kasus ini menggambarkan pentingnya pengenalan dini sindrom Bazex."
|
17 |
-
st.session_state['question'] = "Nama sinonim dari Acrokeratosis paraneoplastica."
|
18 |
-
|
19 |
-
def button2_callback():
|
20 |
-
st.session_state['context'] = "Hingga saat ini, jumlah faktor genetik molekuler yang secara tegas terkait dengan tumor hipofisis dapat dihitung dengan jari: (1) aktivasi GNAS1 pada akromegali; (2) mutasi MENIN dan p27Kip1 (CDKN1B) yang terkait dengan neoplasia endokrin multipel tipe 1; (3) mutasi PRKA1RA dengan hilangnya 17q22-24 di kompleks Carney, dan (4) mutasi gen reseptor hidrokarbon aril yang berinteraksi protein pada 15% adenoma hipofisis terisolasi familial dan 50% akromegali terisolasi familial"
|
21 |
-
st.session_state['question'] = "Mutasi gen mana yang terlibat dalam adenoma hipofisis terisolasi familial?"
|
22 |
-
|
23 |
-
context_placeholder = st.empty()
|
24 |
-
with context_placeholder:
|
25 |
-
context = st.text_area('Enter context: ', key = 'context')
|
26 |
-
|
27 |
-
question_placeholder = st.empty()
|
28 |
-
with question_placeholder:
|
29 |
-
question = st.text_area('Enter question: ', key = 'question')
|
30 |
-
|
31 |
-
st.caption('_Examples_')
|
32 |
-
st.button('Context: \n\n Acrokeratosis paraneoplastica (Sindrom Bazex) dengan karsinoma sel skuamosa orofaringeal. Seorang pria kulit putih berusia 65 tahun menunjukkan semua gambaran klinis akrokeratosis paraneoplastica dari Bazex, ditandai dengan eritema keunguan dan penskalaan hidung, heliks aural, jari tangan, dan kaki, dengan keratoderma dan distrofi kuku yang parah. Pemeriksaan pasien untuk kemungkinan keganasan terkait mengungkapkan karsinoma sel skuamosa asimtomatik di daerah orofaringeal. Lesi kulit sembuh hampir seluruhnya setelah terapi radiasi neoplasma, tetapi onikodistrofi tetap ada. Laporan kasus ini menggambarkan pentingnya pengenalan dini sindrom Bazex. \n\n\n Question: \n\n Nama sinonim dari Acrokeratosis paraneoplastica. \n\n\n Expected Answer: \n\n Sindrom Bazex', use_container_width=True, on_click = button1_callback)
|
33 |
-
st.button('Context: \n\n Hingga saat ini, jumlah faktor genetik molekuler yang secara tegas terkait dengan tumor hipofisis dapat dihitung dengan jari: (1) aktivasi GNAS1 pada akromegali; (2) mutasi MENIN dan p27Kip1 (CDKN1B) yang terkait dengan neoplasia endokrin multipel tipe 1; (3) mutasi PRKA1RA dengan hilangnya 17q22-24 di kompleks Carney, dan (4) mutasi gen reseptor hidrokarbon aril yang berinteraksi protein pada 15% adenoma hipofisis terisolasi familial dan 50% akromegali terisolasi familial \n\n\n Question: \n\n Mutasi gen mana yang terlibat dalam adenoma hipofisis terisolasi familial? \n\n\n Expected Answer: \n\n reseptor hidrokarbon aril yang berinteraksi protein', use_container_width=True, on_click = button2_callback)
|
34 |
-
|
35 |
-
if context and question:
|
36 |
-
st.subheader('MendoBERT')
|
37 |
-
st.write(model(context=context, question=question))
|
38 |
-
st.write("\n")
|
39 |
-
st.subheader('IndoLEM')
|
40 |
-
st.write(basemodel(context=context, question=question))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/events.py
DELETED
@@ -1,385 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import datetime
|
3 |
-
import json
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
import time
|
7 |
-
from collections import defaultdict
|
8 |
-
from contextlib import contextmanager
|
9 |
-
import torch
|
10 |
-
from fvcore.common.file_io import PathManager
|
11 |
-
from fvcore.common.history_buffer import HistoryBuffer
|
12 |
-
|
13 |
-
_CURRENT_STORAGE_STACK = []
|
14 |
-
|
15 |
-
|
16 |
-
def get_event_storage():
|
17 |
-
"""
|
18 |
-
Returns:
|
19 |
-
The :class:`EventStorage` object that's currently being used.
|
20 |
-
Throws an error if no :class`EventStorage` is currently enabled.
|
21 |
-
"""
|
22 |
-
assert len(
|
23 |
-
_CURRENT_STORAGE_STACK
|
24 |
-
), "get_event_storage() has to be called inside a 'with EventStorage(...)' context!"
|
25 |
-
return _CURRENT_STORAGE_STACK[-1]
|
26 |
-
|
27 |
-
|
28 |
-
class EventWriter:
|
29 |
-
"""
|
30 |
-
Base class for writers that obtain events from :class:`EventStorage` and process them.
|
31 |
-
"""
|
32 |
-
|
33 |
-
def write(self):
|
34 |
-
raise NotImplementedError
|
35 |
-
|
36 |
-
def close(self):
|
37 |
-
pass
|
38 |
-
|
39 |
-
|
40 |
-
class JSONWriter(EventWriter):
|
41 |
-
"""
|
42 |
-
Write scalars to a json file.
|
43 |
-
|
44 |
-
It saves scalars as one json per line (instead of a big json) for easy parsing.
|
45 |
-
|
46 |
-
Examples parsing such a json file:
|
47 |
-
|
48 |
-
.. code-block:: none
|
49 |
-
|
50 |
-
$ cat metrics.json | jq -s '.[0:2]'
|
51 |
-
[
|
52 |
-
{
|
53 |
-
"data_time": 0.008433341979980469,
|
54 |
-
"iteration": 20,
|
55 |
-
"loss": 1.9228371381759644,
|
56 |
-
"loss_box_reg": 0.050025828182697296,
|
57 |
-
"loss_classifier": 0.5316952466964722,
|
58 |
-
"loss_mask": 0.7236229181289673,
|
59 |
-
"loss_rpn_box": 0.0856662318110466,
|
60 |
-
"loss_rpn_cls": 0.48198649287223816,
|
61 |
-
"lr": 0.007173333333333333,
|
62 |
-
"time": 0.25401854515075684
|
63 |
-
},
|
64 |
-
{
|
65 |
-
"data_time": 0.007216215133666992,
|
66 |
-
"iteration": 40,
|
67 |
-
"loss": 1.282649278640747,
|
68 |
-
"loss_box_reg": 0.06222952902317047,
|
69 |
-
"loss_classifier": 0.30682939291000366,
|
70 |
-
"loss_mask": 0.6970193982124329,
|
71 |
-
"loss_rpn_box": 0.038663312792778015,
|
72 |
-
"loss_rpn_cls": 0.1471673548221588,
|
73 |
-
"lr": 0.007706666666666667,
|
74 |
-
"time": 0.2490077018737793
|
75 |
-
}
|
76 |
-
]
|
77 |
-
|
78 |
-
$ cat metrics.json | jq '.loss_mask'
|
79 |
-
0.7126231789588928
|
80 |
-
0.689423680305481
|
81 |
-
0.6776131987571716
|
82 |
-
...
|
83 |
-
|
84 |
-
"""
|
85 |
-
|
86 |
-
def __init__(self, json_file, window_size=20):
|
87 |
-
"""
|
88 |
-
Args:
|
89 |
-
json_file (str): path to the json file. New data will be appended if the file exists.
|
90 |
-
window_size (int): the window size of median smoothing for the scalars whose
|
91 |
-
`smoothing_hint` are True.
|
92 |
-
"""
|
93 |
-
self._file_handle = PathManager.open(json_file, "a")
|
94 |
-
self._window_size = window_size
|
95 |
-
|
96 |
-
def write(self):
|
97 |
-
storage = get_event_storage()
|
98 |
-
to_save = {"iteration": storage.iter}
|
99 |
-
to_save.update(storage.latest_with_smoothing_hint(self._window_size))
|
100 |
-
self._file_handle.write(json.dumps(to_save, sort_keys=True) + "\n")
|
101 |
-
self._file_handle.flush()
|
102 |
-
try:
|
103 |
-
os.fsync(self._file_handle.fileno())
|
104 |
-
except AttributeError:
|
105 |
-
pass
|
106 |
-
|
107 |
-
def close(self):
|
108 |
-
self._file_handle.close()
|
109 |
-
|
110 |
-
|
111 |
-
class TensorboardXWriter(EventWriter):
|
112 |
-
"""
|
113 |
-
Write all scalars to a tensorboard file.
|
114 |
-
"""
|
115 |
-
|
116 |
-
def __init__(self, log_dir: str, window_size: int = 20, **kwargs):
|
117 |
-
"""
|
118 |
-
Args:
|
119 |
-
log_dir (str): the directory to save the output events
|
120 |
-
window_size (int): the scalars will be median-smoothed by this window size
|
121 |
-
|
122 |
-
kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`
|
123 |
-
"""
|
124 |
-
self._window_size = window_size
|
125 |
-
from torch.utils.tensorboard import SummaryWriter
|
126 |
-
|
127 |
-
self._writer = SummaryWriter(log_dir, **kwargs)
|
128 |
-
|
129 |
-
def write(self):
|
130 |
-
storage = get_event_storage()
|
131 |
-
for k, v in storage.latest_with_smoothing_hint(self._window_size).items():
|
132 |
-
self._writer.add_scalar(k, v, storage.iter)
|
133 |
-
|
134 |
-
if len(storage.vis_data) >= 1:
|
135 |
-
for img_name, img, step_num in storage.vis_data:
|
136 |
-
self._writer.add_image(img_name, img, step_num)
|
137 |
-
storage.clear_images()
|
138 |
-
|
139 |
-
def close(self):
|
140 |
-
if hasattr(self, "_writer"): # doesn't exist when the code fails at import
|
141 |
-
self._writer.close()
|
142 |
-
|
143 |
-
|
144 |
-
class CommonMetricPrinter(EventWriter):
|
145 |
-
"""
|
146 |
-
Print **common** metrics to the terminal, including
|
147 |
-
iteration time, ETA, memory, all losses, and the learning rate.
|
148 |
-
|
149 |
-
To print something different, please implement a similar printer by yourself.
|
150 |
-
"""
|
151 |
-
|
152 |
-
def __init__(self, max_iter):
|
153 |
-
"""
|
154 |
-
Args:
|
155 |
-
max_iter (int): the maximum number of iterations to train.
|
156 |
-
Used to compute ETA.
|
157 |
-
"""
|
158 |
-
self.logger = logging.getLogger(__name__)
|
159 |
-
self._max_iter = max_iter
|
160 |
-
self._last_write = None
|
161 |
-
|
162 |
-
def write(self):
|
163 |
-
storage = get_event_storage()
|
164 |
-
iteration = storage.iter
|
165 |
-
|
166 |
-
try:
|
167 |
-
data_time = storage.history("data_time").avg(20)
|
168 |
-
except KeyError:
|
169 |
-
# they may not exist in the first few iterations (due to warmup)
|
170 |
-
# or when SimpleTrainer is not used
|
171 |
-
data_time = None
|
172 |
-
|
173 |
-
eta_string = "N/A"
|
174 |
-
try:
|
175 |
-
iter_time = storage.history("time").global_avg()
|
176 |
-
eta_seconds = storage.history("time").median(1000) * (self._max_iter - iteration)
|
177 |
-
storage.put_scalar("eta_seconds", eta_seconds, smoothing_hint=False)
|
178 |
-
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
179 |
-
except KeyError:
|
180 |
-
iter_time = None
|
181 |
-
# estimate eta on our own - more noisy
|
182 |
-
if self._last_write is not None:
|
183 |
-
estimate_iter_time = (time.perf_counter() - self._last_write[1]) / (
|
184 |
-
iteration - self._last_write[0]
|
185 |
-
)
|
186 |
-
eta_seconds = estimate_iter_time * (self._max_iter - iteration)
|
187 |
-
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
188 |
-
self._last_write = (iteration, time.perf_counter())
|
189 |
-
|
190 |
-
try:
|
191 |
-
lr = "{:.6f}".format(storage.history("lr").latest())
|
192 |
-
except KeyError:
|
193 |
-
lr = "N/A"
|
194 |
-
|
195 |
-
if torch.cuda.is_available():
|
196 |
-
max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
|
197 |
-
else:
|
198 |
-
max_mem_mb = None
|
199 |
-
|
200 |
-
# NOTE: max_mem is parsed by grep in "dev/parse_results.sh"
|
201 |
-
self.logger.info(
|
202 |
-
" eta: {eta} iter: {iter} {losses} {time}{data_time}lr: {lr} {memory}".format(
|
203 |
-
eta=eta_string,
|
204 |
-
iter=iteration,
|
205 |
-
losses=" ".join(
|
206 |
-
[
|
207 |
-
"{}: {:.3f}".format(k, v.median(20))
|
208 |
-
for k, v in storage.histories().items()
|
209 |
-
if "loss" in k
|
210 |
-
]
|
211 |
-
),
|
212 |
-
time="time: {:.4f} ".format(iter_time) if iter_time is not None else "",
|
213 |
-
data_time="data_time: {:.4f} ".format(data_time) if data_time is not None else "",
|
214 |
-
lr=lr,
|
215 |
-
memory="max_mem: {:.0f}M".format(max_mem_mb) if max_mem_mb is not None else "",
|
216 |
-
)
|
217 |
-
)
|
218 |
-
|
219 |
-
|
220 |
-
class EventStorage:
|
221 |
-
"""
|
222 |
-
The user-facing class that provides metric storage functionalities.
|
223 |
-
|
224 |
-
In the future we may add support for storing / logging other types of data if needed.
|
225 |
-
"""
|
226 |
-
|
227 |
-
def __init__(self, start_iter=0):
|
228 |
-
"""
|
229 |
-
Args:
|
230 |
-
start_iter (int): the iteration number to start with
|
231 |
-
"""
|
232 |
-
self._history = defaultdict(HistoryBuffer)
|
233 |
-
self._smoothing_hints = {}
|
234 |
-
self._latest_scalars = {}
|
235 |
-
self._iter = start_iter
|
236 |
-
self._current_prefix = ""
|
237 |
-
self._vis_data = []
|
238 |
-
|
239 |
-
def put_image(self, img_name, img_tensor):
|
240 |
-
"""
|
241 |
-
Add an `img_tensor` to the `_vis_data` associated with `img_name`.
|
242 |
-
|
243 |
-
Args:
|
244 |
-
img_name (str): The name of the image to put into tensorboard.
|
245 |
-
img_tensor (torch.Tensor or numpy.array): An `uint8` or `float`
|
246 |
-
Tensor of shape `[channel, height, width]` where `channel` is
|
247 |
-
3. The image format should be RGB. The elements in img_tensor
|
248 |
-
can either have values in [0, 1] (float32) or [0, 255] (uint8).
|
249 |
-
The `img_tensor` will be visualized in tensorboard.
|
250 |
-
"""
|
251 |
-
self._vis_data.append((img_name, img_tensor, self._iter))
|
252 |
-
|
253 |
-
def clear_images(self):
|
254 |
-
"""
|
255 |
-
Delete all the stored images for visualization. This should be called
|
256 |
-
after images are written to tensorboard.
|
257 |
-
"""
|
258 |
-
self._vis_data = []
|
259 |
-
|
260 |
-
def put_scalar(self, name, value, smoothing_hint=True):
|
261 |
-
"""
|
262 |
-
Add a scalar `value` to the `HistoryBuffer` associated with `name`.
|
263 |
-
|
264 |
-
Args:
|
265 |
-
smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be
|
266 |
-
smoothed when logged. The hint will be accessible through
|
267 |
-
:meth:`EventStorage.smoothing_hints`. A writer may ignore the hint
|
268 |
-
and apply custom smoothing rule.
|
269 |
-
|
270 |
-
It defaults to True because most scalars we save need to be smoothed to
|
271 |
-
provide any useful signal.
|
272 |
-
"""
|
273 |
-
name = self._current_prefix + name
|
274 |
-
history = self._history[name]
|
275 |
-
value = float(value)
|
276 |
-
history.update(value, self._iter)
|
277 |
-
self._latest_scalars[name] = value
|
278 |
-
|
279 |
-
existing_hint = self._smoothing_hints.get(name)
|
280 |
-
if existing_hint is not None:
|
281 |
-
assert (
|
282 |
-
existing_hint == smoothing_hint
|
283 |
-
), "Scalar {} was put with a different smoothing_hint!".format(name)
|
284 |
-
else:
|
285 |
-
self._smoothing_hints[name] = smoothing_hint
|
286 |
-
|
287 |
-
def put_scalars(self, *, smoothing_hint=True, **kwargs):
|
288 |
-
"""
|
289 |
-
Put multiple scalars from keyword arguments.
|
290 |
-
|
291 |
-
Examples:
|
292 |
-
|
293 |
-
storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True)
|
294 |
-
"""
|
295 |
-
for k, v in kwargs.items():
|
296 |
-
self.put_scalar(k, v, smoothing_hint=smoothing_hint)
|
297 |
-
|
298 |
-
def history(self, name):
|
299 |
-
"""
|
300 |
-
Returns:
|
301 |
-
HistoryBuffer: the scalar history for name
|
302 |
-
"""
|
303 |
-
ret = self._history.get(name, None)
|
304 |
-
if ret is None:
|
305 |
-
raise KeyError("No history metric available for {}!".format(name))
|
306 |
-
return ret
|
307 |
-
|
308 |
-
def histories(self):
|
309 |
-
"""
|
310 |
-
Returns:
|
311 |
-
dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars
|
312 |
-
"""
|
313 |
-
return self._history
|
314 |
-
|
315 |
-
def latest(self):
|
316 |
-
"""
|
317 |
-
Returns:
|
318 |
-
dict[name -> number]: the scalars that's added in the current iteration.
|
319 |
-
"""
|
320 |
-
return self._latest_scalars
|
321 |
-
|
322 |
-
def latest_with_smoothing_hint(self, window_size=20):
|
323 |
-
"""
|
324 |
-
Similar to :meth:`latest`, but the returned values
|
325 |
-
are either the un-smoothed original latest value,
|
326 |
-
or a median of the given window_size,
|
327 |
-
depend on whether the smoothing_hint is True.
|
328 |
-
|
329 |
-
This provides a default behavior that other writers can use.
|
330 |
-
"""
|
331 |
-
result = {}
|
332 |
-
for k, v in self._latest_scalars.items():
|
333 |
-
result[k] = self._history[k].median(window_size) if self._smoothing_hints[k] else v
|
334 |
-
return result
|
335 |
-
|
336 |
-
def smoothing_hints(self):
|
337 |
-
"""
|
338 |
-
Returns:
|
339 |
-
dict[name -> bool]: the user-provided hint on whether the scalar
|
340 |
-
is noisy and needs smoothing.
|
341 |
-
"""
|
342 |
-
return self._smoothing_hints
|
343 |
-
|
344 |
-
def step(self):
|
345 |
-
"""
|
346 |
-
User should call this function at the beginning of each iteration, to
|
347 |
-
notify the storage of the start of a new iteration.
|
348 |
-
The storage will then be able to associate the new data with the
|
349 |
-
correct iteration number.
|
350 |
-
"""
|
351 |
-
self._iter += 1
|
352 |
-
self._latest_scalars = {}
|
353 |
-
|
354 |
-
@property
|
355 |
-
def vis_data(self):
|
356 |
-
return self._vis_data
|
357 |
-
|
358 |
-
@property
|
359 |
-
def iter(self):
|
360 |
-
return self._iter
|
361 |
-
|
362 |
-
@property
|
363 |
-
def iteration(self):
|
364 |
-
# for backward compatibility
|
365 |
-
return self._iter
|
366 |
-
|
367 |
-
def __enter__(self):
|
368 |
-
_CURRENT_STORAGE_STACK.append(self)
|
369 |
-
return self
|
370 |
-
|
371 |
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
372 |
-
assert _CURRENT_STORAGE_STACK[-1] == self
|
373 |
-
_CURRENT_STORAGE_STACK.pop()
|
374 |
-
|
375 |
-
@contextmanager
|
376 |
-
def name_scope(self, name):
|
377 |
-
"""
|
378 |
-
Yields:
|
379 |
-
A context within which all the events added to this storage
|
380 |
-
will be prefixed by the name scope.
|
381 |
-
"""
|
382 |
-
old_prefix = self._current_prefix
|
383 |
-
self._current_prefix = name.rstrip("/") + "/"
|
384 |
-
yield
|
385 |
-
self._current_prefix = old_prefix
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cpp/memory_resource.h
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file cpp/memory_resource.h
|
18 |
-
* \brief Memory resources for the CPP system.
|
19 |
-
*/
|
20 |
-
|
21 |
-
#pragma once
|
22 |
-
|
23 |
-
#include <thrust/detail/config.h>
|
24 |
-
#include <thrust/mr/new.h>
|
25 |
-
#include <thrust/mr/fancy_pointer_resource.h>
|
26 |
-
|
27 |
-
#include <thrust/system/cpp/pointer.h>
|
28 |
-
|
29 |
-
namespace thrust
|
30 |
-
{
|
31 |
-
namespace system
|
32 |
-
{
|
33 |
-
namespace cpp
|
34 |
-
{
|
35 |
-
|
36 |
-
//! \cond
|
37 |
-
namespace detail
|
38 |
-
{
|
39 |
-
typedef thrust::mr::fancy_pointer_resource<
|
40 |
-
thrust::mr::new_delete_resource,
|
41 |
-
thrust::cpp::pointer<void>
|
42 |
-
> native_resource;
|
43 |
-
}
|
44 |
-
//! \endcond
|
45 |
-
|
46 |
-
/*! \addtogroup memory_resources Memory Resources
|
47 |
-
* \ingroup memory_management_classes
|
48 |
-
*/
|
49 |
-
|
50 |
-
/*! The memory resource for the CPP system. Uses \p mr::new_delete_resource and tags it with \p cpp::pointer. */
|
51 |
-
typedef detail::native_resource memory_resource;
|
52 |
-
/*! An alias for \p cpp::memory_resource. */
|
53 |
-
typedef detail::native_resource universal_memory_resource;
|
54 |
-
/*! An alias for \p cpp::memory_resource. */
|
55 |
-
typedef detail::native_resource universal_host_pinned_memory_resource;
|
56 |
-
|
57 |
-
/*! \}
|
58 |
-
*/
|
59 |
-
|
60 |
-
}
|
61 |
-
}
|
62 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/async/sort.h
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
// The purpose of this header is to #include the async/sort.h header of the
|
18 |
-
// sequential, host, and device systems. It should be #included in any code
|
19 |
-
// which uses ADL to dispatch async sort.
|
20 |
-
|
21 |
-
#pragma once
|
22 |
-
|
23 |
-
#include <thrust/detail/config.h>
|
24 |
-
|
25 |
-
//#include <thrust/system/detail/sequential/async/sort.h>
|
26 |
-
|
27 |
-
//#define __THRUST_HOST_SYSTEM_ASYNC_SORT_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/async/sort.h>
|
28 |
-
//#include __THRUST_HOST_SYSTEM_ASYNC_SORT_HEADER
|
29 |
-
//#undef __THRUST_HOST_SYSTEM_ASYNC_SORT_HEADER
|
30 |
-
|
31 |
-
#define __THRUST_DEVICE_SYSTEM_ASYNC_SORT_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/async/sort.h>
|
32 |
-
#include __THRUST_DEVICE_SYSTEM_ASYNC_SORT_HEADER
|
33 |
-
#undef __THRUST_DEVICE_SYSTEM_ASYNC_SORT_HEADER
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/type_traits/remove_cvref.h
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2018 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/detail/type_traits.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
|
25 |
-
#if THRUST_CPP_DIALECT >= 2020
|
26 |
-
|
27 |
-
using std::remove_cvref;
|
28 |
-
using std::remove_cvref_t;
|
29 |
-
|
30 |
-
#else // Older than C++20.
|
31 |
-
|
32 |
-
template <typename T>
|
33 |
-
struct remove_cvref
|
34 |
-
{
|
35 |
-
typedef typename detail::remove_cv<
|
36 |
-
typename detail::remove_reference<T>::type
|
37 |
-
>::type type;
|
38 |
-
};
|
39 |
-
|
40 |
-
#if THRUST_CPP_DIALECT >= 2011
|
41 |
-
template <typename T>
|
42 |
-
using remove_cvref_t = typename remove_cvref<T>::type;
|
43 |
-
#endif
|
44 |
-
|
45 |
-
#endif // THRUST_CPP_DIALECT >= 2020
|
46 |
-
|
47 |
-
} // end namespace thrust
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/plugins/handler.js
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
import util from 'node:util'
|
2 |
-
import lodash from 'lodash'
|
3 |
-
|
4 |
-
let events = {}
|
5 |
-
let Handler = {
|
6 |
-
add (cfg) {
|
7 |
-
let { ns, fn, self, property = 50 } = cfg
|
8 |
-
let key = cfg.key || cfg.event
|
9 |
-
if (!key || !fn) {
|
10 |
-
return
|
11 |
-
}
|
12 |
-
Handler.del(ns, key)
|
13 |
-
logger.mark(`[Handler][Reg]: [${ns}][${key}]`)
|
14 |
-
events[key] = events[key] || []
|
15 |
-
events[key].push({
|
16 |
-
property,
|
17 |
-
fn,
|
18 |
-
ns,
|
19 |
-
self,
|
20 |
-
key
|
21 |
-
})
|
22 |
-
events[key] = lodash.orderBy(events[key], ['priority'], ['asc'])
|
23 |
-
},
|
24 |
-
del (ns, key = '') {
|
25 |
-
if (!key) {
|
26 |
-
for (let key in events) {
|
27 |
-
Handler.del(ns, key)
|
28 |
-
}
|
29 |
-
return
|
30 |
-
}
|
31 |
-
if (!events[key]) {
|
32 |
-
return
|
33 |
-
}
|
34 |
-
for (let idx = 0; idx < events[key].length; idx++) {
|
35 |
-
let handler = events[key][idx]
|
36 |
-
if (handler.ns === ns) {
|
37 |
-
events[key].splice(idx, 1)
|
38 |
-
events[key] = lodash.orderBy(events[key], ['priority'], ['asc'])
|
39 |
-
}
|
40 |
-
}
|
41 |
-
},
|
42 |
-
async callAll (key, e, args) {
|
43 |
-
// 暂时屏蔽调用
|
44 |
-
// return Handler.call(key, e, args, true)
|
45 |
-
},
|
46 |
-
async call (key, e, args, allHandler = false) {
|
47 |
-
let ret
|
48 |
-
for (let obj of events[key]) {
|
49 |
-
let fn = obj.fn
|
50 |
-
let done = true
|
51 |
-
let reject = (msg = '') => {
|
52 |
-
if (msg) {
|
53 |
-
logger.mark(`[Handler][Reject]: [${obj.ns}][${key}] ${msg}`)
|
54 |
-
}
|
55 |
-
done = false
|
56 |
-
}
|
57 |
-
ret = fn.call(obj.self, e, args, reject)
|
58 |
-
if (util.types.isPromise(ret)) {
|
59 |
-
ret = await ret
|
60 |
-
}
|
61 |
-
if (done && !allHandler) {
|
62 |
-
logger.mark(`[Handler][Done]: [${obj.ns}][${key}]`)
|
63 |
-
return ret
|
64 |
-
}
|
65 |
-
}
|
66 |
-
return ret
|
67 |
-
},
|
68 |
-
has (key) {
|
69 |
-
return !!events[key]
|
70 |
-
}
|
71 |
-
}
|
72 |
-
export default Handler
|
73 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/_version_info.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
# SPDX-License-Identifier: MIT
|
2 |
-
|
3 |
-
|
4 |
-
from functools import total_ordering
|
5 |
-
|
6 |
-
from ._funcs import astuple
|
7 |
-
from ._make import attrib, attrs
|
8 |
-
|
9 |
-
|
10 |
-
@total_ordering
|
11 |
-
@attrs(eq=False, order=False, slots=True, frozen=True)
|
12 |
-
class VersionInfo:
|
13 |
-
"""
|
14 |
-
A version object that can be compared to tuple of length 1--4:
|
15 |
-
|
16 |
-
>>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
|
17 |
-
True
|
18 |
-
>>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
|
19 |
-
True
|
20 |
-
>>> vi = attr.VersionInfo(19, 2, 0, "final")
|
21 |
-
>>> vi < (19, 1, 1)
|
22 |
-
False
|
23 |
-
>>> vi < (19,)
|
24 |
-
False
|
25 |
-
>>> vi == (19, 2,)
|
26 |
-
True
|
27 |
-
>>> vi == (19, 2, 1)
|
28 |
-
False
|
29 |
-
|
30 |
-
.. versionadded:: 19.2
|
31 |
-
"""
|
32 |
-
|
33 |
-
year = attrib(type=int)
|
34 |
-
minor = attrib(type=int)
|
35 |
-
micro = attrib(type=int)
|
36 |
-
releaselevel = attrib(type=str)
|
37 |
-
|
38 |
-
@classmethod
|
39 |
-
def _from_version_string(cls, s):
|
40 |
-
"""
|
41 |
-
Parse *s* and return a _VersionInfo.
|
42 |
-
"""
|
43 |
-
v = s.split(".")
|
44 |
-
if len(v) == 3:
|
45 |
-
v.append("final")
|
46 |
-
|
47 |
-
return cls(
|
48 |
-
year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
|
49 |
-
)
|
50 |
-
|
51 |
-
def _ensure_tuple(self, other):
|
52 |
-
"""
|
53 |
-
Ensure *other* is a tuple of a valid length.
|
54 |
-
|
55 |
-
Returns a possibly transformed *other* and ourselves as a tuple of
|
56 |
-
the same length as *other*.
|
57 |
-
"""
|
58 |
-
|
59 |
-
if self.__class__ is other.__class__:
|
60 |
-
other = astuple(other)
|
61 |
-
|
62 |
-
if not isinstance(other, tuple):
|
63 |
-
raise NotImplementedError
|
64 |
-
|
65 |
-
if not (1 <= len(other) <= 4):
|
66 |
-
raise NotImplementedError
|
67 |
-
|
68 |
-
return astuple(self)[: len(other)], other
|
69 |
-
|
70 |
-
def __eq__(self, other):
|
71 |
-
try:
|
72 |
-
us, them = self._ensure_tuple(other)
|
73 |
-
except NotImplementedError:
|
74 |
-
return NotImplemented
|
75 |
-
|
76 |
-
return us == them
|
77 |
-
|
78 |
-
def __lt__(self, other):
|
79 |
-
try:
|
80 |
-
us, them = self._ensure_tuple(other)
|
81 |
-
except NotImplementedError:
|
82 |
-
return NotImplemented
|
83 |
-
|
84 |
-
# Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
|
85 |
-
# have to do anything special with releaselevel for now.
|
86 |
-
return us < them
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/dotenv/__init__.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
from typing import Any, Optional
|
2 |
-
|
3 |
-
from .main import (dotenv_values, find_dotenv, get_key, load_dotenv, set_key,
|
4 |
-
unset_key)
|
5 |
-
|
6 |
-
|
7 |
-
def load_ipython_extension(ipython: Any) -> None:
|
8 |
-
from .ipython import load_ipython_extension
|
9 |
-
load_ipython_extension(ipython)
|
10 |
-
|
11 |
-
|
12 |
-
def get_cli_string(
|
13 |
-
path: Optional[str] = None,
|
14 |
-
action: Optional[str] = None,
|
15 |
-
key: Optional[str] = None,
|
16 |
-
value: Optional[str] = None,
|
17 |
-
quote: Optional[str] = None,
|
18 |
-
):
|
19 |
-
"""Returns a string suitable for running as a shell script.
|
20 |
-
|
21 |
-
Useful for converting a arguments passed to a fabric task
|
22 |
-
to be passed to a `local` or `run` command.
|
23 |
-
"""
|
24 |
-
command = ['dotenv']
|
25 |
-
if quote:
|
26 |
-
command.append(f'-q {quote}')
|
27 |
-
if path:
|
28 |
-
command.append(f'-f {path}')
|
29 |
-
if action:
|
30 |
-
command.append(action)
|
31 |
-
if key:
|
32 |
-
command.append(key)
|
33 |
-
if value:
|
34 |
-
if ' ' in value:
|
35 |
-
command.append(f'"{value}"')
|
36 |
-
else:
|
37 |
-
command.append(value)
|
38 |
-
|
39 |
-
return ' '.join(command).strip()
|
40 |
-
|
41 |
-
|
42 |
-
__all__ = ['get_cli_string',
|
43 |
-
'load_dotenv',
|
44 |
-
'dotenv_values',
|
45 |
-
'get_key',
|
46 |
-
'set_key',
|
47 |
-
'unset_key',
|
48 |
-
'find_dotenv',
|
49 |
-
'load_ipython_extension']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-75764f1c.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{ae as s}from"./index-3370be2a.js";const o=["static"];export{s as Component,o as modes};
|
2 |
-
//# sourceMappingURL=index-75764f1c.js.map
|
|
|
|
|
|
spaces/DamarJati/DamarJati-NSFW-filter-DecentScan/app.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
-
import os
|
4 |
-
|
5 |
-
pipe = pipeline(task="image-classification",
|
6 |
-
model="DamarJati/NSFW-Filterization-DecentScan"
|
7 |
-
)
|
8 |
-
gr.Interface.from_pipeline(pipe,
|
9 |
-
title="Image Classification",
|
10 |
-
description="NSFW-filter-DecentScan",
|
11 |
-
).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/StyleGAN-NADA/e4e/models/__init__.py
DELETED
File without changes
|
spaces/Datasculptor/car-data/app.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
import io
|
2 |
-
|
3 |
-
import gradio as gr
|
4 |
-
import requests
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import torchvision.transforms as transforms
|
8 |
-
from PIL import Image
|
9 |
-
|
10 |
-
from constants import MAKES_MODELS, PRICE_BIN_LABELS, YEARS
|
11 |
-
|
12 |
-
print("downloading checkpoint...")
|
13 |
-
data = requests.get(
|
14 |
-
"https://data.aqnichol.com/car-data/models/mobilenetv2_432000_calib_torchscript.pt",
|
15 |
-
stream=True,
|
16 |
-
).content
|
17 |
-
|
18 |
-
print("creating model...")
|
19 |
-
model = torch.jit.load(io.BytesIO(data))
|
20 |
-
model.eval()
|
21 |
-
transform = transforms.Compose(
|
22 |
-
[
|
23 |
-
transforms.ToTensor(),
|
24 |
-
transforms.Normalize(
|
25 |
-
(0.48145466, 0.4578275, 0.40821073),
|
26 |
-
(0.26862954, 0.26130258, 0.27577711),
|
27 |
-
),
|
28 |
-
]
|
29 |
-
)
|
30 |
-
|
31 |
-
print("done.")
|
32 |
-
|
33 |
-
|
34 |
-
def classify(img: Image.Image):
|
35 |
-
in_tensor = transform(img)[None]
|
36 |
-
outputs = model(in_tensor)
|
37 |
-
|
38 |
-
price_bins = dict(
|
39 |
-
zip(PRICE_BIN_LABELS, F.softmax(outputs["price_bin"], dim=-1)[0].tolist())
|
40 |
-
)
|
41 |
-
years = dict(
|
42 |
-
zip(
|
43 |
-
[str(year) for year in YEARS] + ["Unknown"],
|
44 |
-
F.softmax(outputs["year"], dim=-1)[0].tolist(),
|
45 |
-
)
|
46 |
-
)
|
47 |
-
make_models = dict(
|
48 |
-
zip(
|
49 |
-
([f"{make} {model}" for make, model in MAKES_MODELS] + ["Unknown"]),
|
50 |
-
F.softmax(outputs["make_model"], dim=-1)[0].tolist(),
|
51 |
-
)
|
52 |
-
)
|
53 |
-
return (
|
54 |
-
f"${int(round(outputs['price_median'].item()))}",
|
55 |
-
price_bins,
|
56 |
-
years,
|
57 |
-
make_models,
|
58 |
-
img,
|
59 |
-
)
|
60 |
-
|
61 |
-
|
62 |
-
iface = gr.Interface(
|
63 |
-
fn=classify,
|
64 |
-
inputs=gr.Image(shape=(224, 224), type="pil"),
|
65 |
-
outputs=[
|
66 |
-
gr.Text(label="Price Prediction"),
|
67 |
-
gr.Label(label="Price Bin", num_top_classes=5),
|
68 |
-
gr.Label(label="Year", num_top_classes=5),
|
69 |
-
gr.Label(label="Make/Model", num_top_classes=10),
|
70 |
-
gr.Image(label="Cropped Input"),
|
71 |
-
],
|
72 |
-
)
|
73 |
-
iface.queue(concurrency_count=2).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Duskfallcrew/darkstorm2150-Protogen_x5.8_Official_Release/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/darkstorm2150/Protogen_x5.8_Official_Release").launch()
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/tutorials/centertrack/opts.py
DELETED
@@ -1,406 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
from __future__ import division
|
3 |
-
from __future__ import print_function
|
4 |
-
|
5 |
-
import argparse
|
6 |
-
import os
|
7 |
-
import sys
|
8 |
-
|
9 |
-
class opts(object):
|
10 |
-
def __init__(self):
|
11 |
-
self.parser = argparse.ArgumentParser()
|
12 |
-
# basic experiment setting
|
13 |
-
self.parser.add_argument('task', default='',
|
14 |
-
help='ctdet | ddd | multi_pose '
|
15 |
-
'| tracking or combined with ,')
|
16 |
-
self.parser.add_argument('--dataset', default='coco',
|
17 |
-
help='see lib/dataset/dataset_facotry for ' +
|
18 |
-
'available datasets')
|
19 |
-
self.parser.add_argument('--test_dataset', default='',
|
20 |
-
help='coco | kitti | coco_hp | pascal')
|
21 |
-
self.parser.add_argument('--exp_id', default='default')
|
22 |
-
self.parser.add_argument('--test', action='store_true')
|
23 |
-
self.parser.add_argument('--debug', type=int, default=0,
|
24 |
-
help='level of visualization.'
|
25 |
-
'1: only show the final detection results'
|
26 |
-
'2: show the network output features'
|
27 |
-
'3: use matplot to display' # useful when lunching training with ipython notebook
|
28 |
-
'4: save all visualizations to disk')
|
29 |
-
self.parser.add_argument('--no_pause', action='store_true')
|
30 |
-
self.parser.add_argument('--demo', default='',
|
31 |
-
help='path to image/ image folders/ video. '
|
32 |
-
'or "webcam"')
|
33 |
-
self.parser.add_argument('--load_model', default='',
|
34 |
-
help='path to pretrained model')
|
35 |
-
self.parser.add_argument('--resume', action='store_true',
|
36 |
-
help='resume an experiment. '
|
37 |
-
'Reloaded the optimizer parameter and '
|
38 |
-
'set load_model to model_last.pth '
|
39 |
-
'in the exp dir if load_model is empty.')
|
40 |
-
|
41 |
-
# system
|
42 |
-
self.parser.add_argument('--gpus', default='0',
|
43 |
-
help='-1 for CPU, use comma for multiple gpus')
|
44 |
-
self.parser.add_argument('--num_workers', type=int, default=4,
|
45 |
-
help='dataloader threads. 0 for single-thread.')
|
46 |
-
self.parser.add_argument('--not_cuda_benchmark', action='store_true',
|
47 |
-
help='disable when the input size is not fixed.')
|
48 |
-
self.parser.add_argument('--seed', type=int, default=317,
|
49 |
-
help='random seed') # from CornerNet
|
50 |
-
self.parser.add_argument('--not_set_cuda_env', action='store_true',
|
51 |
-
help='used when training in slurm clusters.')
|
52 |
-
|
53 |
-
# log
|
54 |
-
self.parser.add_argument('--print_iter', type=int, default=0,
|
55 |
-
help='disable progress bar and print to screen.')
|
56 |
-
self.parser.add_argument('--save_all', action='store_true',
|
57 |
-
help='save model to disk every 5 epochs.')
|
58 |
-
self.parser.add_argument('--vis_thresh', type=float, default=0.3,
|
59 |
-
help='visualization threshold.')
|
60 |
-
self.parser.add_argument('--debugger_theme', default='white',
|
61 |
-
choices=['white', 'black'])
|
62 |
-
self.parser.add_argument('--eval_val', action='store_true')
|
63 |
-
self.parser.add_argument('--save_imgs', default='', help='')
|
64 |
-
self.parser.add_argument('--save_img_suffix', default='', help='')
|
65 |
-
self.parser.add_argument('--skip_first', type=int, default=-1, help='')
|
66 |
-
self.parser.add_argument('--save_video', action='store_true')
|
67 |
-
self.parser.add_argument('--save_framerate', type=int, default=30)
|
68 |
-
self.parser.add_argument('--resize_video', action='store_true')
|
69 |
-
self.parser.add_argument('--video_h', type=int, default=512, help='')
|
70 |
-
self.parser.add_argument('--video_w', type=int, default=512, help='')
|
71 |
-
self.parser.add_argument('--transpose_video', action='store_true')
|
72 |
-
self.parser.add_argument('--show_track_color', action='store_true')
|
73 |
-
self.parser.add_argument('--not_show_bbox', action='store_true')
|
74 |
-
self.parser.add_argument('--not_show_number', action='store_true')
|
75 |
-
self.parser.add_argument('--not_show_txt', action='store_true')
|
76 |
-
self.parser.add_argument('--qualitative', action='store_true')
|
77 |
-
self.parser.add_argument('--tango_color', action='store_true')
|
78 |
-
self.parser.add_argument('--only_show_dots', action='store_true')
|
79 |
-
self.parser.add_argument('--show_trace', action='store_true')
|
80 |
-
|
81 |
-
# model
|
82 |
-
self.parser.add_argument('--arch', default='dla_34',
|
83 |
-
help='model architecture. Currently tested'
|
84 |
-
'res_18 | res_101 | resdcn_18 | resdcn_101 |'
|
85 |
-
'dlav0_34 | dla_34 | hourglass')
|
86 |
-
self.parser.add_argument('--dla_node', default='dcn')
|
87 |
-
self.parser.add_argument('--head_conv', type=int, default=-1,
|
88 |
-
help='conv layer channels for output head'
|
89 |
-
'0 for no conv layer'
|
90 |
-
'-1 for default setting: '
|
91 |
-
'64 for resnets and 256 for dla.')
|
92 |
-
self.parser.add_argument('--num_head_conv', type=int, default=1)
|
93 |
-
self.parser.add_argument('--head_kernel', type=int, default=3, help='')
|
94 |
-
self.parser.add_argument('--down_ratio', type=int, default=4,
|
95 |
-
help='output stride. Currently only supports 4.')
|
96 |
-
self.parser.add_argument('--not_idaup', action='store_true')
|
97 |
-
self.parser.add_argument('--num_classes', type=int, default=-1)
|
98 |
-
self.parser.add_argument('--num_layers', type=int, default=101)
|
99 |
-
self.parser.add_argument('--backbone', default='dla34')
|
100 |
-
self.parser.add_argument('--neck', default='dlaup')
|
101 |
-
self.parser.add_argument('--msra_outchannel', type=int, default=256)
|
102 |
-
self.parser.add_argument('--efficient_level', type=int, default=0)
|
103 |
-
self.parser.add_argument('--prior_bias', type=float, default=-4.6) # -2.19
|
104 |
-
|
105 |
-
# input
|
106 |
-
self.parser.add_argument('--input_res', type=int, default=-1,
|
107 |
-
help='input height and width. -1 for default from '
|
108 |
-
'dataset. Will be overriden by input_h | input_w')
|
109 |
-
self.parser.add_argument('--input_h', type=int, default=-1,
|
110 |
-
help='input height. -1 for default from dataset.')
|
111 |
-
self.parser.add_argument('--input_w', type=int, default=-1,
|
112 |
-
help='input width. -1 for default from dataset.')
|
113 |
-
self.parser.add_argument('--dataset_version', default='')
|
114 |
-
|
115 |
-
# train
|
116 |
-
self.parser.add_argument('--optim', default='adam')
|
117 |
-
self.parser.add_argument('--lr', type=float, default=1.25e-4,
|
118 |
-
help='learning rate for batch size 32.')
|
119 |
-
self.parser.add_argument('--lr_step', type=str, default='60',
|
120 |
-
help='drop learning rate by 10.')
|
121 |
-
self.parser.add_argument('--save_point', type=str, default='90',
|
122 |
-
help='when to save the model to disk.')
|
123 |
-
self.parser.add_argument('--num_epochs', type=int, default=70,
|
124 |
-
help='total training epochs.')
|
125 |
-
self.parser.add_argument('--batch_size', type=int, default=32,
|
126 |
-
help='batch size')
|
127 |
-
self.parser.add_argument('--master_batch_size', type=int, default=-1,
|
128 |
-
help='batch size on the master gpu.')
|
129 |
-
self.parser.add_argument('--num_iters', type=int, default=-1,
|
130 |
-
help='default: #samples / batch_size.')
|
131 |
-
self.parser.add_argument('--val_intervals', type=int, default=10000,
|
132 |
-
help='number of epochs to run validation.')
|
133 |
-
self.parser.add_argument('--trainval', action='store_true',
|
134 |
-
help='include validation in training and '
|
135 |
-
'test on test set')
|
136 |
-
self.parser.add_argument('--ltrb', action='store_true',
|
137 |
-
help='')
|
138 |
-
self.parser.add_argument('--ltrb_weight', type=float, default=0.1,
|
139 |
-
help='')
|
140 |
-
self.parser.add_argument('--reset_hm', action='store_true')
|
141 |
-
self.parser.add_argument('--reuse_hm', action='store_true')
|
142 |
-
self.parser.add_argument('--use_kpt_center', action='store_true')
|
143 |
-
self.parser.add_argument('--add_05', action='store_true')
|
144 |
-
self.parser.add_argument('--dense_reg', type=int, default=1, help='')
|
145 |
-
|
146 |
-
# test
|
147 |
-
self.parser.add_argument('--flip_test', action='store_true',
|
148 |
-
help='flip data augmentation.')
|
149 |
-
self.parser.add_argument('--test_scales', type=str, default='1',
|
150 |
-
help='multi scale test augmentation.')
|
151 |
-
self.parser.add_argument('--nms', action='store_true',
|
152 |
-
help='run nms in testing.')
|
153 |
-
self.parser.add_argument('--K', type=int, default=100,
|
154 |
-
help='max number of output objects.')
|
155 |
-
self.parser.add_argument('--not_prefetch_test', action='store_true',
|
156 |
-
help='not use parallal data pre-processing.')
|
157 |
-
self.parser.add_argument('--fix_short', type=int, default=-1)
|
158 |
-
self.parser.add_argument('--keep_res', action='store_true',
|
159 |
-
help='keep the original resolution'
|
160 |
-
' during validation.')
|
161 |
-
self.parser.add_argument('--map_argoverse_id', action='store_true',
|
162 |
-
help='if trained on nuscenes and eval on kitti')
|
163 |
-
self.parser.add_argument('--out_thresh', type=float, default=-1,
|
164 |
-
help='')
|
165 |
-
self.parser.add_argument('--depth_scale', type=float, default=1,
|
166 |
-
help='')
|
167 |
-
self.parser.add_argument('--save_results', action='store_true')
|
168 |
-
self.parser.add_argument('--load_results', default='')
|
169 |
-
self.parser.add_argument('--use_loaded_results', action='store_true')
|
170 |
-
self.parser.add_argument('--ignore_loaded_cats', default='')
|
171 |
-
self.parser.add_argument('--model_output_list', action='store_true',
|
172 |
-
help='Used when convert to onnx')
|
173 |
-
self.parser.add_argument('--non_block_test', action='store_true')
|
174 |
-
self.parser.add_argument('--vis_gt_bev', default='', help='')
|
175 |
-
self.parser.add_argument('--kitti_split', default='3dop',
|
176 |
-
help='different validation split for kitti: '
|
177 |
-
'3dop | subcnn')
|
178 |
-
self.parser.add_argument('--test_focal_length', type=int, default=-1)
|
179 |
-
|
180 |
-
# dataset
|
181 |
-
self.parser.add_argument('--not_rand_crop', action='store_true',
|
182 |
-
help='not use the random crop data augmentation'
|
183 |
-
'from CornerNet.')
|
184 |
-
self.parser.add_argument('--not_max_crop', action='store_true',
|
185 |
-
help='used when the training dataset has'
|
186 |
-
'inbalanced aspect ratios.')
|
187 |
-
self.parser.add_argument('--shift', type=float, default=0,
|
188 |
-
help='when not using random crop, 0.1'
|
189 |
-
'apply shift augmentation.')
|
190 |
-
self.parser.add_argument('--scale', type=float, default=0,
|
191 |
-
help='when not using random crop, 0.4'
|
192 |
-
'apply scale augmentation.')
|
193 |
-
self.parser.add_argument('--aug_rot', type=float, default=0,
|
194 |
-
help='probability of applying '
|
195 |
-
'rotation augmentation.')
|
196 |
-
self.parser.add_argument('--rotate', type=float, default=0,
|
197 |
-
help='when not using random crop'
|
198 |
-
'apply rotation augmentation.')
|
199 |
-
self.parser.add_argument('--flip', type=float, default=0.5,
|
200 |
-
help='probability of applying flip augmentation.')
|
201 |
-
self.parser.add_argument('--no_color_aug', action='store_true',
|
202 |
-
help='not use the color augmenation '
|
203 |
-
'from CornerNet')
|
204 |
-
|
205 |
-
# Tracking
|
206 |
-
self.parser.add_argument('--tracking', action='store_true')
|
207 |
-
self.parser.add_argument('--pre_hm', action='store_true')
|
208 |
-
self.parser.add_argument('--same_aug_pre', action='store_true')
|
209 |
-
self.parser.add_argument('--zero_pre_hm', action='store_true')
|
210 |
-
self.parser.add_argument('--hm_disturb', type=float, default=0)
|
211 |
-
self.parser.add_argument('--lost_disturb', type=float, default=0)
|
212 |
-
self.parser.add_argument('--fp_disturb', type=float, default=0)
|
213 |
-
self.parser.add_argument('--pre_thresh', type=float, default=-1)
|
214 |
-
self.parser.add_argument('--track_thresh', type=float, default=0.3)
|
215 |
-
self.parser.add_argument('--match_thresh', type=float, default=0.8)
|
216 |
-
self.parser.add_argument('--track_buffer', type=int, default=30)
|
217 |
-
self.parser.add_argument('--new_thresh', type=float, default=0.3)
|
218 |
-
self.parser.add_argument('--max_frame_dist', type=int, default=3)
|
219 |
-
self.parser.add_argument('--ltrb_amodal', action='store_true')
|
220 |
-
self.parser.add_argument('--ltrb_amodal_weight', type=float, default=0.1)
|
221 |
-
self.parser.add_argument('--public_det', action='store_true')
|
222 |
-
self.parser.add_argument('--no_pre_img', action='store_true')
|
223 |
-
self.parser.add_argument('--zero_tracking', action='store_true')
|
224 |
-
self.parser.add_argument('--hungarian', action='store_true')
|
225 |
-
self.parser.add_argument('--max_age', type=int, default=-1)
|
226 |
-
|
227 |
-
|
228 |
-
# loss
|
229 |
-
self.parser.add_argument('--tracking_weight', type=float, default=1)
|
230 |
-
self.parser.add_argument('--reg_loss', default='l1',
|
231 |
-
help='regression loss: sl1 | l1 | l2')
|
232 |
-
self.parser.add_argument('--hm_weight', type=float, default=1,
|
233 |
-
help='loss weight for keypoint heatmaps.')
|
234 |
-
self.parser.add_argument('--off_weight', type=float, default=1,
|
235 |
-
help='loss weight for keypoint local offsets.')
|
236 |
-
self.parser.add_argument('--wh_weight', type=float, default=0.1,
|
237 |
-
help='loss weight for bounding box size.')
|
238 |
-
self.parser.add_argument('--hp_weight', type=float, default=1,
|
239 |
-
help='loss weight for human pose offset.')
|
240 |
-
self.parser.add_argument('--hm_hp_weight', type=float, default=1,
|
241 |
-
help='loss weight for human keypoint heatmap.')
|
242 |
-
self.parser.add_argument('--amodel_offset_weight', type=float, default=1,
|
243 |
-
help='Please forgive the typo.')
|
244 |
-
self.parser.add_argument('--dep_weight', type=float, default=1,
|
245 |
-
help='loss weight for depth.')
|
246 |
-
self.parser.add_argument('--dim_weight', type=float, default=1,
|
247 |
-
help='loss weight for 3d bounding box size.')
|
248 |
-
self.parser.add_argument('--rot_weight', type=float, default=1,
|
249 |
-
help='loss weight for orientation.')
|
250 |
-
self.parser.add_argument('--nuscenes_att', action='store_true')
|
251 |
-
self.parser.add_argument('--nuscenes_att_weight', type=float, default=1)
|
252 |
-
self.parser.add_argument('--velocity', action='store_true')
|
253 |
-
self.parser.add_argument('--velocity_weight', type=float, default=1)
|
254 |
-
|
255 |
-
# custom dataset
|
256 |
-
self.parser.add_argument('--custom_dataset_img_path', default='')
|
257 |
-
self.parser.add_argument('--custom_dataset_ann_path', default='')
|
258 |
-
self.parser.add_argument('--bird_view_world_size', type=int, default=64)
|
259 |
-
|
260 |
-
def parse(self, args=''):
|
261 |
-
if args == '':
|
262 |
-
opt = self.parser.parse_args()
|
263 |
-
else:
|
264 |
-
opt = self.parser.parse_args(args)
|
265 |
-
|
266 |
-
if opt.test_dataset == '':
|
267 |
-
opt.test_dataset = opt.dataset
|
268 |
-
|
269 |
-
opt.gpus_str = opt.gpus
|
270 |
-
opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]
|
271 |
-
opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >=0 else [-1]
|
272 |
-
opt.lr_step = [int(i) for i in opt.lr_step.split(',')]
|
273 |
-
opt.save_point = [int(i) for i in opt.save_point.split(',')]
|
274 |
-
opt.test_scales = [float(i) for i in opt.test_scales.split(',')]
|
275 |
-
opt.save_imgs = [i for i in opt.save_imgs.split(',')] \
|
276 |
-
if opt.save_imgs != '' else []
|
277 |
-
opt.ignore_loaded_cats = \
|
278 |
-
[int(i) for i in opt.ignore_loaded_cats.split(',')] \
|
279 |
-
if opt.ignore_loaded_cats != '' else []
|
280 |
-
|
281 |
-
opt.num_workers = max(opt.num_workers, 2 * len(opt.gpus))
|
282 |
-
opt.pre_img = False
|
283 |
-
if 'tracking' in opt.task:
|
284 |
-
print('Running tracking')
|
285 |
-
opt.tracking = True
|
286 |
-
# opt.out_thresh = max(opt.track_thresh, opt.out_thresh)
|
287 |
-
# opt.pre_thresh = max(opt.track_thresh, opt.pre_thresh)
|
288 |
-
# opt.new_thresh = max(opt.track_thresh, opt.new_thresh)
|
289 |
-
opt.pre_img = not opt.no_pre_img
|
290 |
-
print('Using tracking threshold for out threshold!', opt.track_thresh)
|
291 |
-
if 'ddd' in opt.task:
|
292 |
-
opt.show_track_color = True
|
293 |
-
|
294 |
-
opt.fix_res = not opt.keep_res
|
295 |
-
print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.')
|
296 |
-
|
297 |
-
if opt.head_conv == -1: # init default head_conv
|
298 |
-
opt.head_conv = 256 if 'dla' in opt.arch else 64
|
299 |
-
|
300 |
-
opt.pad = 127 if 'hourglass' in opt.arch else 31
|
301 |
-
opt.num_stacks = 2 if opt.arch == 'hourglass' else 1
|
302 |
-
|
303 |
-
if opt.master_batch_size == -1:
|
304 |
-
opt.master_batch_size = opt.batch_size // len(opt.gpus)
|
305 |
-
rest_batch_size = (opt.batch_size - opt.master_batch_size)
|
306 |
-
opt.chunk_sizes = [opt.master_batch_size]
|
307 |
-
for i in range(len(opt.gpus) - 1):
|
308 |
-
slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
|
309 |
-
if i < rest_batch_size % (len(opt.gpus) - 1):
|
310 |
-
slave_chunk_size += 1
|
311 |
-
opt.chunk_sizes.append(slave_chunk_size)
|
312 |
-
print('training chunk_sizes:', opt.chunk_sizes)
|
313 |
-
|
314 |
-
if opt.debug > 0:
|
315 |
-
opt.num_workers = 0
|
316 |
-
opt.batch_size = 1
|
317 |
-
opt.gpus = [opt.gpus[0]]
|
318 |
-
opt.master_batch_size = -1
|
319 |
-
|
320 |
-
# log dirs
|
321 |
-
opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
|
322 |
-
opt.data_dir = os.path.join(opt.root_dir, 'data')
|
323 |
-
opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task)
|
324 |
-
opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)
|
325 |
-
opt.debug_dir = os.path.join(opt.save_dir, 'debug')
|
326 |
-
|
327 |
-
if opt.resume and opt.load_model == '':
|
328 |
-
opt.load_model = os.path.join(opt.save_dir, 'model_last.pth')
|
329 |
-
return opt
|
330 |
-
|
331 |
-
|
332 |
-
def update_dataset_info_and_set_heads(self, opt, dataset):
|
333 |
-
opt.num_classes = dataset.num_categories \
|
334 |
-
if opt.num_classes < 0 else opt.num_classes
|
335 |
-
# input_h(w): opt.input_h overrides opt.input_res overrides dataset default
|
336 |
-
input_h, input_w = dataset.default_resolution
|
337 |
-
input_h = opt.input_res if opt.input_res > 0 else input_h
|
338 |
-
input_w = opt.input_res if opt.input_res > 0 else input_w
|
339 |
-
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
|
340 |
-
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
|
341 |
-
opt.output_h = opt.input_h // opt.down_ratio
|
342 |
-
opt.output_w = opt.input_w // opt.down_ratio
|
343 |
-
opt.input_res = max(opt.input_h, opt.input_w)
|
344 |
-
opt.output_res = max(opt.output_h, opt.output_w)
|
345 |
-
|
346 |
-
opt.heads = {'hm': opt.num_classes, 'reg': 2, 'wh': 2}
|
347 |
-
|
348 |
-
if 'tracking' in opt.task:
|
349 |
-
opt.heads.update({'tracking': 2})
|
350 |
-
|
351 |
-
if 'ddd' in opt.task:
|
352 |
-
opt.heads.update({'dep': 1, 'rot': 8, 'dim': 3, 'amodel_offset': 2})
|
353 |
-
|
354 |
-
if 'multi_pose' in opt.task:
|
355 |
-
opt.heads.update({
|
356 |
-
'hps': dataset.num_joints * 2, 'hm_hp': dataset.num_joints,
|
357 |
-
'hp_offset': 2})
|
358 |
-
|
359 |
-
if opt.ltrb:
|
360 |
-
opt.heads.update({'ltrb': 4})
|
361 |
-
if opt.ltrb_amodal:
|
362 |
-
opt.heads.update({'ltrb_amodal': 4})
|
363 |
-
if opt.nuscenes_att:
|
364 |
-
opt.heads.update({'nuscenes_att': 8})
|
365 |
-
if opt.velocity:
|
366 |
-
opt.heads.update({'velocity': 3})
|
367 |
-
|
368 |
-
weight_dict = {'hm': opt.hm_weight, 'wh': opt.wh_weight,
|
369 |
-
'reg': opt.off_weight, 'hps': opt.hp_weight,
|
370 |
-
'hm_hp': opt.hm_hp_weight, 'hp_offset': opt.off_weight,
|
371 |
-
'dep': opt.dep_weight, 'rot': opt.rot_weight,
|
372 |
-
'dim': opt.dim_weight,
|
373 |
-
'amodel_offset': opt.amodel_offset_weight,
|
374 |
-
'ltrb': opt.ltrb_weight,
|
375 |
-
'tracking': opt.tracking_weight,
|
376 |
-
'ltrb_amodal': opt.ltrb_amodal_weight,
|
377 |
-
'nuscenes_att': opt.nuscenes_att_weight,
|
378 |
-
'velocity': opt.velocity_weight}
|
379 |
-
opt.weights = {head: weight_dict[head] for head in opt.heads}
|
380 |
-
for head in opt.weights:
|
381 |
-
if opt.weights[head] == 0:
|
382 |
-
del opt.heads[head]
|
383 |
-
opt.head_conv = {head: [opt.head_conv \
|
384 |
-
for i in range(opt.num_head_conv if head != 'reg' else 1)] for head in opt.heads}
|
385 |
-
|
386 |
-
print('input h w:', opt.input_h, opt.input_w)
|
387 |
-
print('heads', opt.heads)
|
388 |
-
print('weights', opt.weights)
|
389 |
-
print('head conv', opt.head_conv)
|
390 |
-
|
391 |
-
return opt
|
392 |
-
|
393 |
-
def init(self, args=''):
|
394 |
-
# only used in demo
|
395 |
-
default_dataset_info = {
|
396 |
-
'ctdet': 'coco', 'multi_pose': 'coco_hp', 'ddd': 'nuscenes',
|
397 |
-
'tracking,ctdet': 'coco', 'tracking,multi_pose': 'coco_hp',
|
398 |
-
'tracking,ddd': 'nuscenes'
|
399 |
-
}
|
400 |
-
opt = self.parse()
|
401 |
-
from dataset.dataset_factory import dataset_factory
|
402 |
-
train_dataset = default_dataset_info[opt.task] \
|
403 |
-
if opt.task in default_dataset_info else 'coco'
|
404 |
-
dataset = dataset_factory[train_dataset]
|
405 |
-
opt = self.update_dataset_info_and_set_heads(opt, dataset)
|
406 |
-
return opt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ECCV2022/bytetrack/tutorials/cstrack/tracker.py
DELETED
@@ -1,542 +0,0 @@
|
|
1 |
-
from collections import deque
|
2 |
-
import os
|
3 |
-
import cv2
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
from torchsummary import summary
|
8 |
-
|
9 |
-
from core.mot.general import non_max_suppression_and_inds, non_max_suppression_jde, non_max_suppression, scale_coords
|
10 |
-
from core.mot.torch_utils import intersect_dicts
|
11 |
-
from models.mot.cstrack import Model
|
12 |
-
|
13 |
-
from mot_online import matching
|
14 |
-
from mot_online.kalman_filter import KalmanFilter
|
15 |
-
from mot_online.log import logger
|
16 |
-
from mot_online.utils import *
|
17 |
-
|
18 |
-
from mot_online.basetrack import BaseTrack, TrackState
|
19 |
-
|
20 |
-
|
21 |
-
class STrack(BaseTrack):
|
22 |
-
shared_kalman = KalmanFilter()
|
23 |
-
def __init__(self, tlwh, score, temp_feat, buffer_size=30):
|
24 |
-
|
25 |
-
# wait activate
|
26 |
-
self._tlwh = np.asarray(tlwh, dtype=np.float)
|
27 |
-
self.kalman_filter = None
|
28 |
-
self.mean, self.covariance = None, None
|
29 |
-
self.is_activated = False
|
30 |
-
|
31 |
-
self.score = score
|
32 |
-
self.tracklet_len = 0
|
33 |
-
|
34 |
-
self.smooth_feat = None
|
35 |
-
self.update_features(temp_feat)
|
36 |
-
self.features = deque([], maxlen=buffer_size)
|
37 |
-
self.alpha = 0.9
|
38 |
-
|
39 |
-
def update_features(self, feat):
|
40 |
-
feat /= np.linalg.norm(feat)
|
41 |
-
self.curr_feat = feat
|
42 |
-
if self.smooth_feat is None:
|
43 |
-
self.smooth_feat = feat
|
44 |
-
else:
|
45 |
-
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
|
46 |
-
self.features.append(feat)
|
47 |
-
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
|
48 |
-
|
49 |
-
def predict(self):
|
50 |
-
mean_state = self.mean.copy()
|
51 |
-
if self.state != TrackState.Tracked:
|
52 |
-
mean_state[7] = 0
|
53 |
-
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
|
54 |
-
|
55 |
-
@staticmethod
|
56 |
-
def multi_predict(stracks):
|
57 |
-
if len(stracks) > 0:
|
58 |
-
multi_mean = np.asarray([st.mean.copy() for st in stracks])
|
59 |
-
multi_covariance = np.asarray([st.covariance for st in stracks])
|
60 |
-
for i, st in enumerate(stracks):
|
61 |
-
if st.state != TrackState.Tracked:
|
62 |
-
multi_mean[i][7] = 0
|
63 |
-
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
|
64 |
-
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
|
65 |
-
stracks[i].mean = mean
|
66 |
-
stracks[i].covariance = cov
|
67 |
-
|
68 |
-
def activate(self, kalman_filter, frame_id):
|
69 |
-
"""Start a new tracklet"""
|
70 |
-
self.kalman_filter = kalman_filter
|
71 |
-
self.track_id = self.next_id()
|
72 |
-
self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
|
73 |
-
|
74 |
-
self.tracklet_len = 0
|
75 |
-
self.state = TrackState.Tracked
|
76 |
-
#self.is_activated = True
|
77 |
-
self.frame_id = frame_id
|
78 |
-
self.start_frame = frame_id
|
79 |
-
|
80 |
-
def re_activate(self, new_track, frame_id, new_id=False):
|
81 |
-
self.mean, self.covariance = self.kalman_filter.update(
|
82 |
-
self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
|
83 |
-
)
|
84 |
-
|
85 |
-
self.update_features(new_track.curr_feat)
|
86 |
-
self.tracklet_len = 0
|
87 |
-
self.state = TrackState.Tracked
|
88 |
-
self.is_activated = True
|
89 |
-
self.frame_id = frame_id
|
90 |
-
if new_id:
|
91 |
-
self.track_id = self.next_id()
|
92 |
-
|
93 |
-
def update(self, new_track, frame_id, update_feature=True):
|
94 |
-
"""
|
95 |
-
Update a matched track
|
96 |
-
:type new_track: STrack
|
97 |
-
:type frame_id: int
|
98 |
-
:type update_feature: bool
|
99 |
-
:return:
|
100 |
-
"""
|
101 |
-
self.frame_id = frame_id
|
102 |
-
self.tracklet_len += 1
|
103 |
-
|
104 |
-
new_tlwh = new_track.tlwh
|
105 |
-
self.mean, self.covariance = self.kalman_filter.update(
|
106 |
-
self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
|
107 |
-
self.state = TrackState.Tracked
|
108 |
-
self.is_activated = True
|
109 |
-
|
110 |
-
self.score = new_track.score
|
111 |
-
if update_feature:
|
112 |
-
self.update_features(new_track.curr_feat)
|
113 |
-
|
114 |
-
@property
|
115 |
-
# @jit(nopython=True)
|
116 |
-
def tlwh(self):
|
117 |
-
"""Get current position in bounding box format `(top left x, top left y,
|
118 |
-
width, height)`.
|
119 |
-
"""
|
120 |
-
if self.mean is None:
|
121 |
-
return self._tlwh.copy()
|
122 |
-
ret = self.mean[:4].copy()
|
123 |
-
ret[2] *= ret[3]
|
124 |
-
ret[:2] -= ret[2:] / 2
|
125 |
-
return ret
|
126 |
-
|
127 |
-
@property
|
128 |
-
# @jit(nopython=True)
|
129 |
-
def tlbr(self):
|
130 |
-
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
|
131 |
-
`(top left, bottom right)`.
|
132 |
-
"""
|
133 |
-
ret = self.tlwh.copy()
|
134 |
-
ret[2:] += ret[:2]
|
135 |
-
return ret
|
136 |
-
|
137 |
-
@staticmethod
|
138 |
-
# @jit(nopython=True)
|
139 |
-
def tlwh_to_xyah(tlwh):
|
140 |
-
"""Convert bounding box to format `(center x, center y, aspect ratio,
|
141 |
-
height)`, where the aspect ratio is `width / height`.
|
142 |
-
"""
|
143 |
-
ret = np.asarray(tlwh).copy()
|
144 |
-
ret[:2] += ret[2:] / 2
|
145 |
-
ret[2] /= ret[3]
|
146 |
-
return ret
|
147 |
-
|
148 |
-
def to_xyah(self):
|
149 |
-
return self.tlwh_to_xyah(self.tlwh)
|
150 |
-
|
151 |
-
@staticmethod
|
152 |
-
# @jit(nopython=True)
|
153 |
-
def tlbr_to_tlwh(tlbr):
|
154 |
-
ret = np.asarray(tlbr).copy()
|
155 |
-
ret[2:] -= ret[:2]
|
156 |
-
return ret
|
157 |
-
|
158 |
-
@staticmethod
|
159 |
-
# @jit(nopython=True)
|
160 |
-
def tlwh_to_tlbr(tlwh):
|
161 |
-
ret = np.asarray(tlwh).copy()
|
162 |
-
ret[2:] += ret[:2]
|
163 |
-
return ret
|
164 |
-
|
165 |
-
def __repr__(self):
|
166 |
-
return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
|
167 |
-
|
168 |
-
|
169 |
-
class JDETracker(object):
|
170 |
-
def __init__(self, opt, frame_rate=30):
|
171 |
-
self.opt = opt
|
172 |
-
if int(opt.gpus[0]) >= 0:
|
173 |
-
opt.device = torch.device('cuda')
|
174 |
-
else:
|
175 |
-
opt.device = torch.device('cpu')
|
176 |
-
print('Creating model...')
|
177 |
-
|
178 |
-
ckpt = torch.load(opt.weights, map_location=opt.device) # load checkpoint
|
179 |
-
self.model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=1).to(opt.device) # create
|
180 |
-
exclude = ['anchor'] if opt.cfg else [] # exclude keys
|
181 |
-
if type(ckpt['model']).__name__ == "OrderedDict":
|
182 |
-
state_dict = ckpt['model']
|
183 |
-
else:
|
184 |
-
state_dict = ckpt['model'].float().state_dict() # to FP32
|
185 |
-
state_dict = intersect_dicts(state_dict, self.model.state_dict(), exclude=exclude) # intersect
|
186 |
-
self.model.load_state_dict(state_dict, strict=False) # load
|
187 |
-
self.model.cuda().eval()
|
188 |
-
total_params = sum(p.numel() for p in self.model.parameters())
|
189 |
-
print(f'{total_params:,} total parameters.')
|
190 |
-
|
191 |
-
|
192 |
-
self.tracked_stracks = [] # type: list[STrack]
|
193 |
-
self.lost_stracks = [] # type: list[STrack]
|
194 |
-
self.removed_stracks = [] # type: list[STrack]
|
195 |
-
|
196 |
-
self.frame_id = 0
|
197 |
-
self.det_thresh = opt.conf_thres
|
198 |
-
self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
|
199 |
-
self.max_time_lost = self.buffer_size
|
200 |
-
self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
|
201 |
-
self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
|
202 |
-
|
203 |
-
self.kalman_filter = KalmanFilter()
|
204 |
-
self.low_thres = 0.2
|
205 |
-
self.high_thres = self.opt.conf_thres + 0.1
|
206 |
-
|
207 |
-
def update(self, im_blob, img0,seq_num, save_dir):
|
208 |
-
self.frame_id += 1
|
209 |
-
activated_starcks = []
|
210 |
-
refind_stracks = []
|
211 |
-
lost_stracks = []
|
212 |
-
removed_stracks = []
|
213 |
-
dets = []
|
214 |
-
|
215 |
-
''' Step 1: Network forward, get detections & embeddings'''
|
216 |
-
with torch.no_grad():
|
217 |
-
output = self.model(im_blob, augment=False)
|
218 |
-
pred, train_out = output[1]
|
219 |
-
|
220 |
-
pred = pred[pred[:, :, 4] > self.low_thres]
|
221 |
-
detections = []
|
222 |
-
if len(pred) > 0:
|
223 |
-
dets,x_inds,y_inds = non_max_suppression_and_inds(pred[:,:6].unsqueeze(0), 0.1, self.opt.nms_thres,method='cluster_diou')
|
224 |
-
if len(dets) != 0:
|
225 |
-
scale_coords(self.opt.img_size, dets[:, :4], img0.shape).round()
|
226 |
-
id_feature = output[0][0, y_inds, x_inds, :].cpu().numpy()
|
227 |
-
|
228 |
-
remain_inds = dets[:, 4] > self.opt.conf_thres
|
229 |
-
inds_low = dets[:, 4] > self.low_thres
|
230 |
-
inds_high = dets[:, 4] < self.opt.conf_thres
|
231 |
-
inds_second = np.logical_and(inds_low, inds_high)
|
232 |
-
dets_second = dets[inds_second]
|
233 |
-
if id_feature.shape[0] == 1:
|
234 |
-
id_feature_second = id_feature
|
235 |
-
else:
|
236 |
-
id_feature_second = id_feature[inds_second]
|
237 |
-
dets = dets[remain_inds]
|
238 |
-
id_feature = id_feature[remain_inds]
|
239 |
-
|
240 |
-
detections = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
|
241 |
-
(tlbrs, f) in zip(dets[:, :5], id_feature)]
|
242 |
-
|
243 |
-
else:
|
244 |
-
detections = []
|
245 |
-
dets_second = []
|
246 |
-
id_feature_second = []
|
247 |
-
|
248 |
-
''' Add newly detected tracklets to tracked_stracks'''
|
249 |
-
unconfirmed = []
|
250 |
-
tracked_stracks = [] # type: list[STrack]
|
251 |
-
for track in self.tracked_stracks:
|
252 |
-
if not track.is_activated:
|
253 |
-
unconfirmed.append(track)
|
254 |
-
else:
|
255 |
-
tracked_stracks.append(track)
|
256 |
-
|
257 |
-
''' Step 2: First association, with embedding'''
|
258 |
-
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
|
259 |
-
# Predict the current location with KF
|
260 |
-
#for strack in strack_pool:
|
261 |
-
#strack.predict()
|
262 |
-
STrack.multi_predict(strack_pool)
|
263 |
-
dists = matching.embedding_distance(strack_pool, detections)
|
264 |
-
dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections)
|
265 |
-
#dists = matching.iou_distance(strack_pool, detections)
|
266 |
-
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.4)
|
267 |
-
|
268 |
-
for itracked, idet in matches:
|
269 |
-
track = strack_pool[itracked]
|
270 |
-
det = detections[idet]
|
271 |
-
if track.state == TrackState.Tracked:
|
272 |
-
track.update(detections[idet], self.frame_id)
|
273 |
-
activated_starcks.append(track)
|
274 |
-
else:
|
275 |
-
track.re_activate(det, self.frame_id, new_id=False)
|
276 |
-
refind_stracks.append(track)
|
277 |
-
|
278 |
-
# vis
|
279 |
-
track_features, det_features, cost_matrix, cost_matrix_det, cost_matrix_track = [],[],[],[],[]
|
280 |
-
if self.opt.vis_state == 1 and self.frame_id % 20 == 0:
|
281 |
-
if len(dets) != 0:
|
282 |
-
for i in range(0, dets.shape[0]):
|
283 |
-
bbox = dets[i][0:4]
|
284 |
-
cv2.rectangle(img0, (int(bbox[0]), int(bbox[1])),(int(bbox[2]), int(bbox[3])),(0, 255, 0), 2)
|
285 |
-
track_features, det_features, cost_matrix, cost_matrix_det, cost_matrix_track = matching.vis_id_feature_A_distance(strack_pool, detections)
|
286 |
-
vis_feature(self.frame_id,seq_num,img0,track_features,
|
287 |
-
det_features, cost_matrix, cost_matrix_det, cost_matrix_track, max_num=5, out_path=save_dir)
|
288 |
-
|
289 |
-
''' Step 3: Second association, with IOU'''
|
290 |
-
detections = [detections[i] for i in u_detection]
|
291 |
-
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
|
292 |
-
dists = matching.iou_distance(r_tracked_stracks, detections)
|
293 |
-
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.5)
|
294 |
-
|
295 |
-
for itracked, idet in matches:
|
296 |
-
track = r_tracked_stracks[itracked]
|
297 |
-
det = detections[idet]
|
298 |
-
if track.state == TrackState.Tracked:
|
299 |
-
track.update(det, self.frame_id)
|
300 |
-
activated_starcks.append(track)
|
301 |
-
else:
|
302 |
-
track.re_activate(det, self.frame_id, new_id=False)
|
303 |
-
refind_stracks.append(track)
|
304 |
-
|
305 |
-
# association the untrack to the low score detections
|
306 |
-
if len(dets_second) > 0:
|
307 |
-
detections_second = [STrack(STrack.tlbr_to_tlwh(tlbrs[:4]), tlbrs[4], f, 30) for
|
308 |
-
(tlbrs, f) in zip(dets_second[:, :5], id_feature_second)]
|
309 |
-
else:
|
310 |
-
detections_second = []
|
311 |
-
second_tracked_stracks = [r_tracked_stracks[i] for i in u_track if r_tracked_stracks[i].state == TrackState.Tracked]
|
312 |
-
dists = matching.iou_distance(second_tracked_stracks, detections_second)
|
313 |
-
matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.4)
|
314 |
-
for itracked, idet in matches:
|
315 |
-
track = second_tracked_stracks[itracked]
|
316 |
-
det = detections_second[idet]
|
317 |
-
if track.state == TrackState.Tracked:
|
318 |
-
track.update(det, self.frame_id)
|
319 |
-
activated_starcks.append(track)
|
320 |
-
else:
|
321 |
-
track.re_activate(det, self.frame_id, new_id=False)
|
322 |
-
refind_stracks.append(track)
|
323 |
-
|
324 |
-
for it in u_track:
|
325 |
-
track = second_tracked_stracks[it]
|
326 |
-
if not track.state == TrackState.Lost:
|
327 |
-
track.mark_lost()
|
328 |
-
lost_stracks.append(track)
|
329 |
-
|
330 |
-
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
|
331 |
-
detections = [detections[i] for i in u_detection]
|
332 |
-
dists = matching.iou_distance(unconfirmed, detections)
|
333 |
-
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
|
334 |
-
for itracked, idet in matches:
|
335 |
-
unconfirmed[itracked].update(detections[idet], self.frame_id)
|
336 |
-
activated_starcks.append(unconfirmed[itracked])
|
337 |
-
for it in u_unconfirmed:
|
338 |
-
track = unconfirmed[it]
|
339 |
-
track.mark_removed()
|
340 |
-
removed_stracks.append(track)
|
341 |
-
|
342 |
-
""" Step 4: Init new stracks"""
|
343 |
-
for inew in u_detection:
|
344 |
-
track = detections[inew]
|
345 |
-
if track.score < self.high_thres:
|
346 |
-
continue
|
347 |
-
track.activate(self.kalman_filter, self.frame_id)
|
348 |
-
activated_starcks.append(track)
|
349 |
-
""" Step 5: Update state"""
|
350 |
-
for track in self.lost_stracks:
|
351 |
-
if self.frame_id - track.end_frame > self.max_time_lost:
|
352 |
-
track.mark_removed()
|
353 |
-
removed_stracks.append(track)
|
354 |
-
|
355 |
-
# print('Ramained match {} s'.format(t4-t3))
|
356 |
-
|
357 |
-
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
|
358 |
-
self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
|
359 |
-
self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
|
360 |
-
self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
|
361 |
-
self.lost_stracks.extend(lost_stracks)
|
362 |
-
self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
|
363 |
-
self.removed_stracks.extend(removed_stracks)
|
364 |
-
self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
|
365 |
-
# get scores of lost tracks
|
366 |
-
output_stracks = [track for track in self.tracked_stracks if track.is_activated]
|
367 |
-
|
368 |
-
logger.debug('===========Frame {}=========='.format(self.frame_id))
|
369 |
-
logger.debug('Activated: {}'.format([track.track_id for track in activated_starcks]))
|
370 |
-
logger.debug('Refind: {}'.format([track.track_id for track in refind_stracks]))
|
371 |
-
logger.debug('Lost: {}'.format([track.track_id for track in lost_stracks]))
|
372 |
-
logger.debug('Removed: {}'.format([track.track_id for track in removed_stracks]))
|
373 |
-
|
374 |
-
return output_stracks
|
375 |
-
|
376 |
-
|
377 |
-
def joint_stracks(tlista, tlistb):
|
378 |
-
exists = {}
|
379 |
-
res = []
|
380 |
-
for t in tlista:
|
381 |
-
exists[t.track_id] = 1
|
382 |
-
res.append(t)
|
383 |
-
for t in tlistb:
|
384 |
-
tid = t.track_id
|
385 |
-
if not exists.get(tid, 0):
|
386 |
-
exists[tid] = 1
|
387 |
-
res.append(t)
|
388 |
-
return res
|
389 |
-
|
390 |
-
|
391 |
-
def sub_stracks(tlista, tlistb):
|
392 |
-
stracks = {}
|
393 |
-
for t in tlista:
|
394 |
-
stracks[t.track_id] = t
|
395 |
-
for t in tlistb:
|
396 |
-
tid = t.track_id
|
397 |
-
if stracks.get(tid, 0):
|
398 |
-
del stracks[tid]
|
399 |
-
return list(stracks.values())
|
400 |
-
|
401 |
-
|
402 |
-
def remove_duplicate_stracks(stracksa, stracksb):
|
403 |
-
pdist = matching.iou_distance(stracksa, stracksb)
|
404 |
-
pairs = np.where(pdist < 0.15)
|
405 |
-
dupa, dupb = list(), list()
|
406 |
-
for p, q in zip(*pairs):
|
407 |
-
timep = stracksa[p].frame_id - stracksa[p].start_frame
|
408 |
-
timeq = stracksb[q].frame_id - stracksb[q].start_frame
|
409 |
-
if timep > timeq:
|
410 |
-
dupb.append(q)
|
411 |
-
else:
|
412 |
-
dupa.append(p)
|
413 |
-
resa = [t for i, t in enumerate(stracksa) if not i in dupa]
|
414 |
-
resb = [t for i, t in enumerate(stracksb) if not i in dupb]
|
415 |
-
return resa, resb
|
416 |
-
|
417 |
-
def vis_feature(frame_id,seq_num,img,track_features, det_features, cost_matrix, cost_matrix_det, cost_matrix_track,max_num=5, out_path='/home/XX/'):
|
418 |
-
num_zero = ["0000","000","00","0"]
|
419 |
-
img = cv2.resize(img, (778, 435))
|
420 |
-
|
421 |
-
if len(det_features) != 0:
|
422 |
-
max_f = det_features.max()
|
423 |
-
min_f = det_features.min()
|
424 |
-
det_features = np.round((det_features - min_f) / (max_f - min_f) * 255)
|
425 |
-
det_features = det_features.astype(np.uint8)
|
426 |
-
d_F_M = []
|
427 |
-
cutpff_line = [40]*512
|
428 |
-
for d_f in det_features:
|
429 |
-
for row in range(45):
|
430 |
-
d_F_M += [[40]*3+d_f.tolist()+[40]*3]
|
431 |
-
for row in range(3):
|
432 |
-
d_F_M += [[40]*3+cutpff_line+[40]*3]
|
433 |
-
d_F_M = np.array(d_F_M)
|
434 |
-
d_F_M = d_F_M.astype(np.uint8)
|
435 |
-
det_features_img = cv2.applyColorMap(d_F_M, cv2.COLORMAP_JET)
|
436 |
-
feature_img2 = cv2.resize(det_features_img, (435, 435))
|
437 |
-
#cv2.putText(feature_img2, "det_features", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
438 |
-
else:
|
439 |
-
feature_img2 = np.zeros((435, 435))
|
440 |
-
feature_img2 = feature_img2.astype(np.uint8)
|
441 |
-
feature_img2 = cv2.applyColorMap(feature_img2, cv2.COLORMAP_JET)
|
442 |
-
#cv2.putText(feature_img2, "det_features", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
443 |
-
feature_img = np.concatenate((img, feature_img2), axis=1)
|
444 |
-
|
445 |
-
if len(cost_matrix_det) != 0 and len(cost_matrix_det[0]) != 0:
|
446 |
-
max_f = cost_matrix_det.max()
|
447 |
-
min_f = cost_matrix_det.min()
|
448 |
-
cost_matrix_det = np.round((cost_matrix_det - min_f) / (max_f - min_f) * 255)
|
449 |
-
d_F_M = []
|
450 |
-
cutpff_line = [40]*len(cost_matrix_det)*10
|
451 |
-
for c_m in cost_matrix_det:
|
452 |
-
add = []
|
453 |
-
for row in range(len(c_m)):
|
454 |
-
add += [255-c_m[row]]*10
|
455 |
-
for row in range(10):
|
456 |
-
d_F_M += [[40]+add+[40]]
|
457 |
-
d_F_M = np.array(d_F_M)
|
458 |
-
d_F_M = d_F_M.astype(np.uint8)
|
459 |
-
cost_matrix_det_img = cv2.applyColorMap(d_F_M, cv2.COLORMAP_JET)
|
460 |
-
feature_img2 = cv2.resize(cost_matrix_det_img, (435, 435))
|
461 |
-
#cv2.putText(feature_img2, "cost_matrix_det", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
462 |
-
else:
|
463 |
-
feature_img2 = np.zeros((435, 435))
|
464 |
-
feature_img2 = feature_img2.astype(np.uint8)
|
465 |
-
feature_img2 = cv2.applyColorMap(feature_img2, cv2.COLORMAP_JET)
|
466 |
-
#cv2.putText(feature_img2, "cost_matrix_det", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
467 |
-
feature_img = np.concatenate((feature_img, feature_img2), axis=1)
|
468 |
-
|
469 |
-
if len(track_features) != 0:
|
470 |
-
max_f = track_features.max()
|
471 |
-
min_f = track_features.min()
|
472 |
-
track_features = np.round((track_features - min_f) / (max_f - min_f) * 255)
|
473 |
-
track_features = track_features.astype(np.uint8)
|
474 |
-
d_F_M = []
|
475 |
-
cutpff_line = [40]*512
|
476 |
-
for d_f in track_features:
|
477 |
-
for row in range(45):
|
478 |
-
d_F_M += [[40]*3+d_f.tolist()+[40]*3]
|
479 |
-
for row in range(3):
|
480 |
-
d_F_M += [[40]*3+cutpff_line+[40]*3]
|
481 |
-
d_F_M = np.array(d_F_M)
|
482 |
-
d_F_M = d_F_M.astype(np.uint8)
|
483 |
-
track_features_img = cv2.applyColorMap(d_F_M, cv2.COLORMAP_JET)
|
484 |
-
feature_img2 = cv2.resize(track_features_img, (435, 435))
|
485 |
-
#cv2.putText(feature_img2, "track_features", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
486 |
-
else:
|
487 |
-
feature_img2 = np.zeros((435, 435))
|
488 |
-
feature_img2 = feature_img2.astype(np.uint8)
|
489 |
-
feature_img2 = cv2.applyColorMap(feature_img2, cv2.COLORMAP_JET)
|
490 |
-
#cv2.putText(feature_img2, "track_features", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
491 |
-
feature_img = np.concatenate((feature_img, feature_img2), axis=1)
|
492 |
-
|
493 |
-
if len(cost_matrix_track) != 0 and len(cost_matrix_track[0]) != 0:
|
494 |
-
max_f = cost_matrix_track.max()
|
495 |
-
min_f = cost_matrix_track.min()
|
496 |
-
cost_matrix_track = np.round((cost_matrix_track - min_f) / (max_f - min_f) * 255)
|
497 |
-
d_F_M = []
|
498 |
-
cutpff_line = [40]*len(cost_matrix_track)*10
|
499 |
-
for c_m in cost_matrix_track:
|
500 |
-
add = []
|
501 |
-
for row in range(len(c_m)):
|
502 |
-
add += [255-c_m[row]]*10
|
503 |
-
for row in range(10):
|
504 |
-
d_F_M += [[40]+add+[40]]
|
505 |
-
d_F_M = np.array(d_F_M)
|
506 |
-
d_F_M = d_F_M.astype(np.uint8)
|
507 |
-
cost_matrix_track_img = cv2.applyColorMap(d_F_M, cv2.COLORMAP_JET)
|
508 |
-
feature_img2 = cv2.resize(cost_matrix_track_img, (435, 435))
|
509 |
-
#cv2.putText(feature_img2, "cost_matrix_track", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
510 |
-
else:
|
511 |
-
feature_img2 = np.zeros((435, 435))
|
512 |
-
feature_img2 = feature_img2.astype(np.uint8)
|
513 |
-
feature_img2 = cv2.applyColorMap(feature_img2, cv2.COLORMAP_JET)
|
514 |
-
#cv2.putText(feature_img2, "cost_matrix_track", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
515 |
-
feature_img = np.concatenate((feature_img, feature_img2), axis=1)
|
516 |
-
|
517 |
-
if len(cost_matrix) != 0 and len(cost_matrix[0]) != 0:
|
518 |
-
max_f = cost_matrix.max()
|
519 |
-
min_f = cost_matrix.min()
|
520 |
-
cost_matrix = np.round((cost_matrix - min_f) / (max_f - min_f) * 255)
|
521 |
-
d_F_M = []
|
522 |
-
cutpff_line = [40]*len(cost_matrix[0])*10
|
523 |
-
for c_m in cost_matrix:
|
524 |
-
add = []
|
525 |
-
for row in range(len(c_m)):
|
526 |
-
add += [255-c_m[row]]*10
|
527 |
-
for row in range(10):
|
528 |
-
d_F_M += [[40]+add+[40]]
|
529 |
-
d_F_M = np.array(d_F_M)
|
530 |
-
d_F_M = d_F_M.astype(np.uint8)
|
531 |
-
cost_matrix_img = cv2.applyColorMap(d_F_M, cv2.COLORMAP_JET)
|
532 |
-
feature_img2 = cv2.resize(cost_matrix_img, (435, 435))
|
533 |
-
#cv2.putText(feature_img2, "cost_matrix", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
534 |
-
else:
|
535 |
-
feature_img2 = np.zeros((435, 435))
|
536 |
-
feature_img2 = feature_img2.astype(np.uint8)
|
537 |
-
feature_img2 = cv2.applyColorMap(feature_img2, cv2.COLORMAP_JET)
|
538 |
-
#cv2.putText(feature_img2, "cost_matrix", (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
|
539 |
-
feature_img = np.concatenate((feature_img, feature_img2), axis=1)
|
540 |
-
|
541 |
-
dst_path = out_path + "/" + seq_num + "_" + num_zero[len(str(frame_id))-1] + str(frame_id) + '.png'
|
542 |
-
cv2.imwrite(dst_path, feature_img)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EPFL-VILAB/MultiMAE/utils/cross_entropy.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# Based on the timm code base
|
3 |
-
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
|
4 |
-
# --------------------------------------------------------
|
5 |
-
|
6 |
-
|
7 |
-
""" Cross Entropy w/ smoothing or soft targets
|
8 |
-
|
9 |
-
Hacked together by / Copyright 2021 Ross Wightman
|
10 |
-
"""
|
11 |
-
|
12 |
-
import torch
|
13 |
-
import torch.nn as nn
|
14 |
-
import torch.nn.functional as F
|
15 |
-
|
16 |
-
|
17 |
-
class LabelSmoothingCrossEntropy(nn.Module):
|
18 |
-
""" NLL loss with label smoothing.
|
19 |
-
"""
|
20 |
-
|
21 |
-
def __init__(self, smoothing=0.1):
|
22 |
-
super(LabelSmoothingCrossEntropy, self).__init__()
|
23 |
-
assert smoothing < 1.0
|
24 |
-
self.smoothing = smoothing
|
25 |
-
self.confidence = 1. - smoothing
|
26 |
-
|
27 |
-
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
28 |
-
logprobs = F.log_softmax(x, dim=-1)
|
29 |
-
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
|
30 |
-
nll_loss = nll_loss.squeeze(1)
|
31 |
-
smooth_loss = -logprobs.mean(dim=-1)
|
32 |
-
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
|
33 |
-
return loss.mean()
|
34 |
-
|
35 |
-
|
36 |
-
class SoftTargetCrossEntropy(nn.Module):
|
37 |
-
|
38 |
-
def __init__(self):
|
39 |
-
super(SoftTargetCrossEntropy, self).__init__()
|
40 |
-
|
41 |
-
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
|
42 |
-
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
|
43 |
-
return loss.mean()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|