Commit
·
ec31aaa
1
Parent(s):
8b62e96
Update parquet files (step 5 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cisdem PDF Converter OCR 7.5.0 Crack MacOS MacOSX __FULL__.md +0 -26
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA Online 3 The Ultimate Online Football Experience.md +0 -27
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Ashampoo Burning Studio.md +0 -27
- spaces/1gistliPinn/ChatGPT4/Examples/Adobe Acrobat Pro DC 2019.008.20074 Activation .rar.md +0 -10
- spaces/1gistliPinn/ChatGPT4/Examples/Download Firebug For Firefox 16.0.2.md +0 -22
- spaces/1gistliPinn/ChatGPT4/Examples/Download __LINK__ Iron Man 2008 In Hindi.md +0 -39
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Askies by JazziDisciples and Mr JazziQ The Meaning Lyrics and Reviews of the Amapiano Smash Hit.md +0 -126
- spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r100.py +0 -26
- spaces/7eu7d7/anime-ai-detect-fucker/attacker/FGSM.py +0 -48
- spaces/7eu7d7/anime-ai-detect-fucker/attacker/base.py +0 -33
- spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Engineering Interviews 4be8039581d04456b0151f2cc4b22130/Questions ede8818b3a0e447f80145905690eb3f6/To Do List Design 9d9cb6c13b4b4a0a8f7ab03a8c98a2d8.md +0 -41
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/image_degradation/utils_image.py +0 -916
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/phi/m_bg.wasm.d.ts +0 -14
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/pie/Pie.js +0 -68
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/methods/CreateSwatch.js +0 -16
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetMaxChildWidth.js +0 -18
- spaces/Aiusernumber5/janitorai/README.md +0 -10
- spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/__init__.py +0 -60
- spaces/AlignmentResearch/tuned-lens/app.py +0 -117
- spaces/Aloento/9Nine-PITS/text/frontend/zh_frontend.py +0 -287
- spaces/Aloento/9Nine-PITS/text/frontend/zh_normalization/char_convert.py +0 -46
- spaces/An-619/FastSAM/utils/__init__.py +0 -0
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_inpaint.py +0 -1138
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py +0 -5
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/constants.py +0 -2
- spaces/Arnx/MusicGenXvAKN/audiocraft/models/builders.py +0 -218
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/list.py +0 -365
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/__init__.py +0 -4
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/win32.py +0 -180
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/archive_util.py +0 -213
- spaces/BAAI/AltDiffusion-m9/share_btn.py +0 -60
- spaces/Bart92/RVC_HF/infer/modules/onnx/export.py +0 -52
- spaces/BetterAPI/BetterChat_new/src/lib/types/Message.ts +0 -5
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py +0 -49
- spaces/BigChungux/Pet_Survey/app.py +0 -172
- spaces/Boadiwaa/Recipes/openai/api_requestor.py +0 -365
- spaces/CK42/sentiment-model-comparison/app.py +0 -92
- spaces/CVPR/WALT/mmcv_custom/__init__.py +0 -5
- spaces/CVPR/WALT/mmdet/models/dense_heads/yolact_head.py +0 -943
- spaces/CVPR/WALT/mmdet/models/detectors/faster_rcnn.py +0 -24
- spaces/CVPR/regionclip-demo/detectron2/modeling/meta_arch/clip_rcnn.py +0 -1560
- spaces/ClearLove443/Robby-chatbot/setup.sh +0 -21
- spaces/CofAI/njpad/style.css +0 -28
- spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/api.py +0 -170
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/img.py +0 -521
- spaces/DHEIVER/Segmento_de_Angio_Coronariana_v6/obstruction_detector.py +0 -55
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/XbmImagePlugin.py +0 -94
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_v_h_e_a.py +0 -126
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/__init__.py +0 -0
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cisdem PDF Converter OCR 7.5.0 Crack MacOS MacOSX __FULL__.md
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cisdem PDF Converter OCR 7.5.0: A Powerful Tool to Convert and Edit PDFs on Mac</h1>
|
3 |
-
<p>If you are looking for a reliable and versatile PDF converter for your Mac, you might want to check out Cisdem PDF Converter OCR 7.5.0. This software can help you convert any native PDF, scanned PDF, encrypted PDF, or image file to various editable and searchable formats, such as Word, Excel, PowerPoint, ePub, HTML, Text, RTF, Pages, Keynote, and images (JPEG, BMP, PNG, GIF, TIFF). It also supports OCR technology to recognize text in images and scanned documents, and allows you to customize your conversion with options specific to area, language, and output quality.</p>
|
4 |
-
<p>In this article, we will review some of the key features and benefits of Cisdem PDF Converter OCR 7.5.0 for Mac users.</p>
|
5 |
-
<h2>Cisdem PDF Converter OCR 7.5.0 Crack macOS MacOSX</h2><br /><p><b><b>DOWNLOAD</b> ✑ ✑ ✑ <a href="https://byltly.com/2uKzgx">https://byltly.com/2uKzgx</a></b></p><br /><br />
|
6 |
-
<h2>Convert PDFs to Multiple Formats with High Accuracy</h2>
|
7 |
-
<p>One of the main advantages of Cisdem PDF Converter OCR 7.5.0 is that it can handle various types of PDFs and images, and convert them to different formats according to your needs. Whether you want to edit a PDF document in Word or PowerPoint, create an e-book in ePub format, publish a PDF on the web as HTML, or extract data from a PDF table to Excel, you can do it easily with this software. You can also convert PDFs and images to iWork files (Pages, Keynote) for use in other office editor apps.</p>
|
8 |
-
<p>Moreover, Cisdem PDF Converter OCR 7.5.0 can preserve the original layout and file quality of your source files after conversion. It has up to 99.8% character recognition accuracy and can retain the fonts, colors, graphics, tables, columns, and other elements of your documents. You can also adjust the output quality and size of your converted files according to your preferences.</p>
|
9 |
-
<h2>Perform OCR on Scanned Documents and Images</h2>
|
10 |
-
<p>Another useful feature of Cisdem PDF Converter OCR 7.5.0 is that it can perform optical character recognition (OCR) on scanned documents and images that contain text. This means that you can convert these files into editable and searchable formats as well, instead of having to retype or copy-paste the content manually.</p>
|
11 |
-
<p>Cisdem PDF Converter OCR 7.5.0 supports 49 languages scanning and recognizing, including English, French, Italian, Chinese, etc., and also supports conversion of PDF files that contain multiple languages. You can also select the specific areas of your files that you want to convert with the four markup options: select, mark texts, mark images, and mark tables.</p>
|
12 |
-
<h2>Create PDFs from Other Documents</h2>
|
13 |
-
<p>Besides converting PDFs and images to other formats, Cisdem PDF Converter OCR 7.5.0 can also create PDFs from other documents such as Word, PowerPoint, HTML, ePub, etc. You can simply drag and drop your files into the software interface and choose the output format as PDF. You can also merge multiple files into one PDF document if you want.</p>
|
14 |
-
<p>Cisdem PDF Converter OCR 7.5.0 can create high-quality PDFs that are compatible with various devices and platforms. You can also encrypt your PDFs with passwords and permissions to protect your sensitive information.</p>
|
15 |
-
<h2>How to Download and Install Cisdem PDF Converter OCR 7.5.0 for Mac</h2>
|
16 |
-
<p>If you are interested in trying out Cisdem PDF Converter OCR 7.5.0 for Mac, you can download it from the official website[^1^] or from other reputable sources[^2^] [^3^] [^4^]. The software is compatible with macOS 10.10 or later versions.</p>
|
17 |
-
<p>To install Cisdem PDF Converter OCR 7.5.0 for Mac, you need to follow these steps:</p>
|
18 |
-
<p></p>
|
19 |
-
<ol>
|
20 |
-
<li>Download the DMG file from the website.</li>
|
21 |
-
<li>Double-click the DMG file to open it.</li>
|
22 |
-
<li>Drag and drop the Cisdem PDF Converter OCR icon to the Applications folder.</li>
|
23 |
-
<li>Launch the software from the Applications folder or the Launchpad.</li>
|
24 |
-
<li>Enter your license code or start a free trial.</</p> cec2833e83<br />
|
25 |
-
<br />
|
26 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA Online 3 The Ultimate Online Football Experience.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download FIFA Online 3 and Play Football with Real Players</h1>
|
3 |
-
<p>FIFA Online 3 is a free-to-play online football game that lets you manage your own team and compete with other players from around the world. You can choose from over 30 leagues and 15,000 real-world players to build your dream squad. Whether you want to play single-player through a season or challenge other online players in various modes, FIFA Online 3 has something for every football fan.</p>
|
4 |
-
<h2>download fifa online 3</h2><br /><p><b><b>Download File</b> 🆗 <a href="https://byltly.com/2uKzdk">https://byltly.com/2uKzdk</a></b></p><br /><br />
|
5 |
-
<p>In this article, we will show you how to download FIFA Online 3 and start playing right away.</p>
|
6 |
-
<h2>Step 1: Visit the official website of FIFA Online 3</h2>
|
7 |
-
<p>The first thing you need to do is to visit the official website of FIFA Online 3. Depending on your region, you may need to select a different website. For example, if you are in Asia, you can go to <a href="https://fo3.garena.com/">https://fo3.garena.com/</a>. If you are in Europe, you can go to <a href="https://www.ea.com/games/fifa/fifa-22">https://www.ea.com/games/fifa/fifa-22</a>.</p>
|
8 |
-
<h2>Step 2: Register an account and download the game client</h2>
|
9 |
-
<p>Once you are on the website, you will need to register an account and download the game client. You can either use your email address or your social media account to sign up. After that, you will be able to download the game client from the website. The file size is about 4 GB, so it may take some time depending on your internet speed.</p>
|
10 |
-
<h2>Step 3: Install and launch the game</h2>
|
11 |
-
<p>After downloading the game client, you will need to install and launch the game. Follow the instructions on the screen and agree to the terms and conditions. Once the installation is complete, you can launch the game from your desktop or start menu.</p>
|
12 |
-
<h2>Step 4: Create your team and start playing</h2>
|
13 |
-
<p>When you launch the game, you will be asked to create your team and choose your preferred league and players. You can customize your team name, logo, kit, formation, tactics, and more. You can also buy new players and items using the in-game currency called EP.</p>
|
14 |
-
<p></p>
|
15 |
-
<p>After creating your team, you can start playing the game. You can either play single-player through a season or play against other online players in various modes. Some of the modes include FIFA Online World Tour, League Mode, Tournament Mode, and more. You can also join a club and chat with other players.</p>
|
16 |
-
<p>FIFA Online 3 is a fun and exciting online football game that lets you experience the thrill of managing and playing with your favorite team and players. If you are a fan of football, you should definitely give it a try.</p><h2>Step 5: Learn some tips and tricks to improve your game</h2>
|
17 |
-
<p>If you want to improve your game and win more matches, you may want to learn some tips and tricks from the experts. Here are some of the things you can do to enhance your skills and strategies in FIFA Online 3.</p>
|
18 |
-
<ul>
|
19 |
-
<li>Practice your basic controls and moves. You can use the tutorial mode or the practice mode to learn how to pass, shoot, dribble, tackle, and more. You can also adjust the difficulty level and the game speed to suit your preference.</li>
|
20 |
-
<li>Study your opponents and their tactics. You can use the scouting feature to see the stats and formation of your opponents before you play against them. You can also watch replays of their matches to analyze their strengths and weaknesses.</li>
|
21 |
-
<li>Use the right players for the right positions. You can use the player search feature to find the best players for your team based on their attributes, skills, and ratings. You can also use the chemistry feature to see how well your players work together on the pitch.</li>
|
22 |
-
<li>Upgrade your players and items. You can use the training feature to improve your players' abilities and potential. You can also use the upgrade feature to enhance your items' effects and durability.</li>
|
23 |
-
<li>Join a club and cooperate with other players. You can join a club or create your own club and invite other players to join. You can chat with your club members, share tips and strategies, and play together in club matches and tournaments.</li>
|
24 |
-
</ul>
|
25 |
-
<p>By following these tips and tricks, you will be able to master FIFA Online 3 and become a champion.</p> ddb901b051<br />
|
26 |
-
<br />
|
27 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Ashampoo Burning Studio.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Free Download Ashampoo Burning Studio: A Simple and Powerful CD/DVD Burner</h1>
|
3 |
-
<p>If you are looking for a free and easy-to-use software to burn your data, music, videos, or backups to CD or DVD discs, you might want to try Ashampoo Burning Studio. This is a popular and feature-rich disc burning software that can handle all your disc burning needs with speed and convenience.</p>
|
4 |
-
<p>In this article, we will show you how to free download Ashampoo Burning Studio and what you can do with it.</p>
|
5 |
-
<h2>free download ashampoo burning studio</h2><br /><p><b><b>Download File</b> >>>>> <a href="https://byltly.com/2uKvAw">https://byltly.com/2uKvAw</a></b></p><br /><br />
|
6 |
-
<h2>How to Free Download Ashampoo Burning Studio</h2>
|
7 |
-
<p>To free download Ashampoo Burning Studio, you can visit the official website of Ashampoo or other trusted software download sites such as CNET or FileHippo. You will find a download button that will start the download process. The file size is about 60 MB and it will take a few minutes to complete the download depending on your internet speed.</p>
|
8 |
-
<p>Once the download is finished, you can run the setup file and follow the instructions to install the software on your computer. The installation is quick and easy and you can customize some settings such as the language, the installation folder, and the desktop shortcut. You will also need to register the software for free with your email address to activate it.</p>
|
9 |
-
<h2>What You Can Do with Ashampoo Burning Studio</h2>
|
10 |
-
<p>Ashampoo Burning Studio is a versatile and powerful disc burning software that can do a lot of things for you. Here are some of the main features of Ashampoo Burning Studio:</p>
|
11 |
-
<ul>
|
12 |
-
<li><b>Burn data discs</b>: You can burn any files or folders to CD, DVD, or Blu-ray discs with ease. You can also update or erase existing discs if they are rewritable. You can choose from different file systems and settings to optimize your discs for different purposes.</li>
|
13 |
-
<li><b>Create and burn backups</b>: You can create and burn compressed and password-protected backups of your important data to CD, DVD, or Blu-ray discs. You can also restore your backups from the discs with a few clicks. Ashampoo Burning Studio can also split large backups into multiple volumes if they don't fit on a single disc.</li>
|
14 |
-
<li><b>Rip or create audio CDs</b>: You can rip audio CDs to MP3, WMA, or WAV files with automatic track recognition. You can also create your own audio CDs from various audio formats with built-in normalization and player. You can also create MP3 or WMA discs that can store more songs than traditional audio CDs.</li>
|
15 |
-
<li><b>Burn movies</b>: You can burn HD and Full HD videos to CD, DVD, or Blu-ray discs as long as you have them in a prepared folder. You can also create video CDs (VCD) or super video CDs (SVCD) from standard videos. Ashampoo Burning Studio supports various video formats such as AVI, MP4, MKV, MOV, etc.</li>
|
16 |
-
<li><b>Handle disc images</b>: You can create or burn disc images from data files in various formats such as ISO, CUE/BIN, or ASHDISC. You can also mount disc images as virtual drives and access them without burning them to discs.</li>
|
17 |
-
</ul>
|
18 |
-
<h2>Conclusion</h2>
|
19 |
-
<p>Ashampoo Burning Studio is a free and reliable disc burning software that can help you burn your data, music, videos, or backups to CD, DVD, or Blu-ray discs with ease. It has a simple and intuitive interface that makes it suitable for beginners and advanced users alike. It also has many features and options that let you customize your discs according to your needs.</p>
|
20 |
-
<p>If you want to free download Ashampoo Burning Studio and try it out for yourself, you can visit the links below:</p>
|
21 |
-
<p></p>
|
22 |
-
<ul>
|
23 |
-
<li><a href="https://www.ashampoo.com/en-us/burning-studio-free">Ashampoo Burning Studio Free - Free CD & DVD Burning Software</a></li>
|
24 |
-
<li><a href="https://download.cnet.com/Ashampoo-Burning-Studio-Free/3000-2646_4-10776287.html">Ashampoo Burning Studio Free - Free download and software reviews - CNET Download</a></li>
|
25 |
-
<li><a href="https://filehippo.com/download_ashampoo-burning-studio/">Ashampoo Burning Studio Free for Windows - FileHippo</</p> ddb901b051<br />
|
26 |
-
<br />
|
27 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Adobe Acrobat Pro DC 2019.008.20074 Activation .rar.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
<h2>Adobe Acrobat Pro DC 2019.008.20074 Activation .rar</h2><br /><p><b><b>Download File</b> ✺ <a href="https://imgfil.com/2uxXU6">https://imgfil.com/2uxXU6</a></b></p><br /><br />
|
2 |
-
|
3 |
-
How to activate Adobe Acrobat Pro DC 2019.008.20074? First you must turn on your computer. Then go to www.adobe.com/activate/acrobat/. You must enter your Adobe ID and then click on “Activate my Adobe Acrobat Professional 2019” button. Now you must enter the link you receive and click on “Activate Acrobat Professional 2019” button. How do I install Acrobat Pro DC 2019.008.20074 on my computer? You must download Acrobat pro DC 2019.008.20074 file from the link above and follow the instructions that come with the file to install Acrobat pro DC 2019.008.20074 on your PC. How to install Adobe Acrobat Pro DC 2019.008.20074 on my computer: Download file Adobe Acrobat Pro DC 2019.008.20074 from the link above. Then follow the instructions that come with the file to install Acrobat pro DC 2019.008.20074 on your computer. Have you just installed Adobe Acrobat pro DC 2019.008.20074? Did you get a message or need to enter a code? You can read the details on how to activate Acrobat pro DC 2019.008.20074. You need Adobe Acrobat Pro DC 2019.008.20074 activation code in order to activate your copy of Adobe Acrobat Pro DC 2019.008.20074. If you do not have the Adobe Acrobat Pro DC 2019 activation code you can download the.xml code files of Adobe Acrobat Pro DC 2019.008.20074 activation code from the link below.
|
4 |
-
|
5 |
-
How to activate Adobe Acrobat Pro DC 2019?If you received a message saying “This software will activate in 5 minutes”, then you can assume that the Adobe Acrobat pro DC 2019 activation code has been sent to your e-mail address. It is necessary to activate Acrobat pro DC 2019 on your computer, in order to start using your copy of this software. You can read the details on how to activate Acrobat pro DC 2019. It is easy to activate Adobe Acrobat pro DC 2019 on your computer. However, you need to know the Adobe Acrobat pro DC 2019 activation code to activate your copy of Adobe Acrobat pro DC 2019. You can find out how to activate Acrobat pro DC 2019 at the end of this page.
|
6 |
-
|
7 |
-
Adobe Acrobat Pro DC 2019.008.20074 Activation.rar ✌ . Adobe Acrobat Pro DC 4fefd39f24<br />
|
8 |
-
<br />
|
9 |
-
<br />
|
10 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download Firebug For Firefox 16.0.2.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Firebug for Firefox 16.0.2</h1>
|
3 |
-
<p>Firebug is a web development tool that integrates with Firefox and allows you to edit, debug, and monitor CSS, HTML, and JavaScript live in any web page[^1^]. It is a useful tool for web developers who want to inspect and tweak the code of their websites.</p>
|
4 |
-
<p>However, Firebug is no longer maintained and updated by its developers, and it is not compatible with the latest versions of Firefox. The last version of Firebug was 2.0.19, which was released in October 2016 and only works with Firefox versions up to 49[^2^]. If you are using Firefox 16.0.2, which was released in November 2012, you can still download and install Firebug 2.0.19 from the official website[^1^]. Here are the steps to do so:</p>
|
5 |
-
<h2>download firebug for firefox 16.0.2</h2><br /><p><b><b>Download File</b> ⚹ <a href="https://imgfil.com/2uxYvS">https://imgfil.com/2uxYvS</a></b></p><br /><br />
|
6 |
-
<ol>
|
7 |
-
<li>Go to <a href="https://getfirebug.com/downloads">https://getfirebug.com/downloads</a> and click on the link for Firebug 2.0.19.</li>
|
8 |
-
<li>Save the file firebug-2.0.19.xpi to your computer.</li>
|
9 |
-
<li>Open Firefox 16.0.2 and go to the menu button (the three horizontal bars) and click on Add-ons.</li>
|
10 |
-
<li>Click on the gear icon and select Install Add-on From File.</li>
|
11 |
-
<li>Browse to the location where you saved firebug-2.0.19.xpi and select it.</li>
|
12 |
-
<li>Click on Install Now and restart Firefox when prompted.</li>
|
13 |
-
<li>Firebug should now be installed and you can access it by clicking on the Firebug icon (a bug with a flame) in the toolbar or by pressing F12.</li>
|
14 |
-
</ol>
|
15 |
-
<p>Note that Firebug is no longer supported by its developers and may not work properly with some websites or features. It is recommended that you switch to the latest version of Firefox and use the built-in developer tools instead[^1^]. You can also try other web development tools such as Chrome DevTools or Web Inspector.</p>
|
16 |
-
|
17 |
-
<p>If you want to learn more about Firebug and how to use it for web development, you can check out the official website which has a lot of documentation, tutorials, and tips. You can also visit the Firebug blog to read about the latest news and updates on Firebug. You can also join the Firebug community on Google Groups, Stack Overflow, or Twitter to ask questions, share feedback, or report bugs.</p>
|
18 |
-
<p>Firebug has been a pioneer and a leader in web development tools for many years, and it has helped millions of web developers create amazing websites. However, as the web evolves and new technologies emerge, Firebug has become outdated and incompatible with modern browsers. The Firebug team has decided to stop working on Firebug and focus on contributing to the Firefox developer tools instead. The Firefox developer tools are based on some of the features and concepts of Firebug, but they are more advanced, powerful, and integrated with Firefox. They offer a range of tools such as inspector, console, debugger, network monitor, performance analyzer, storage inspector, accessibility inspector, and more. You can access them by pressing Ctrl+Shift+I or by clicking on the menu button and selecting Web Developer.</p>
|
19 |
-
<p>If you are still using Firefox 16.0.2 and Firebug 2.0.19, you may want to consider upgrading to the latest version of Firefox and switching to the Firefox developer tools. You will get a better browsing experience, more security and privacy, and more web development features. You can download the latest version of Firefox from <a href="https://www.mozilla.org/en-US/firefox/new/">https://www.mozilla.org/en-US/firefox/new/</a>. You can also learn more about the Firefox developer tools from <a href="https://developer.mozilla.org/en-US/docs/Tools">https://developer.mozilla.org/en-US/docs/Tools</a>.</p>
|
20 |
-
<p></p> d5da3c52bf<br />
|
21 |
-
<br />
|
22 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Download __LINK__ Iron Man 2008 In Hindi.md
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Iron Man (2008) Movie in Hindi-Eng</h1>
|
3 |
-
<p>Iron Man (2008) is a Hollywood movie based on Action, Adventure, and Science Fiction. It stars Robert Downey Jr. as Tony Stark, a billionaire engineer who creates a unique weaponized suit of armor to fight evil after being held captive in an Afghan cave. The movie was directed by Jon Favreau and received positive reviews from critics and audiences alike.</p>
|
4 |
-
<p>If you want to download Iron Man (2008) movie in Hindi-Eng, you have several options to choose from. You can either use a torrent site, a streaming site, or an archive site. Here are some of the best sources to download Iron Man (2008) movie in Hindi-Eng:</p>
|
5 |
-
<h2>download iron man 2008 in hindi</h2><br /><p><b><b>Download Zip</b> ✑ <a href="https://imgfil.com/2uy1Go">https://imgfil.com/2uy1Go</a></b></p><br /><br />
|
6 |
-
<ul>
|
7 |
-
<li><strong>Torrent site:</strong> You can use a torrent site like The Pirate Bay or 1337x to download Iron Man (2008) movie in Hindi-Eng. You will need a torrent client like uTorrent or BitTorrent to download the movie file. You can also use a VPN service to hide your IP address and avoid any legal issues. Some of the torrent links for Iron Man (2008) movie in Hindi-Eng are:
|
8 |
-
|
9 |
-
<ol>
|
10 |
-
<li>Iron Man 720p ( Hindi) : Free Download, Borrow, and Streaming : Internet Archive[^1^]</li>
|
11 |
-
<li>Download Iron Man (2008) Movie | Hindi-Eng | PogoLinks[^2^]</li>
|
12 |
-
<li>Iron Man (2008) Dual Audio Hindi-English 480p [375MB ... - Mkvhub[^3^]</li>
|
13 |
-
</ol>
|
14 |
-
</li>
|
15 |
-
|
16 |
-
<li><strong>Streaming site:</strong> You can use a streaming site like PogoLinks or Mkvhub to watch Iron Man (2008) movie in Hindi-Eng online. You will need a good internet connection and a compatible device to stream the movie. You can also download the movie from these sites if you want. Some of the streaming links for Iron Man (2008) movie in Hindi-Eng are:
|
17 |
-
|
18 |
-
<ol>
|
19 |
-
<li>Download Iron Man (2008) Movie | Hindi-Eng | PogoLinks[^2^]</li>
|
20 |
-
<li>Iron Man (2008) Dual Audio Hindi-English 480p [375MB ... - Mkvhub[^3^]</li>
|
21 |
-
</ol>
|
22 |
-
</li>
|
23 |
-
|
24 |
-
<li><strong>Archive site:</strong> You can use an archive site like Internet Archive or Archive.org to download Iron Man (2008) movie in Hindi-Eng. These sites store old and rare movies that are not available elsewhere. You can also watch the movie online on these sites if you want. Some of the archive links for Iron Man (2008) movie in Hindi-Eng are:
|
25 |
-
|
26 |
-
<ol>
|
27 |
-
<li>Iron Man 720p ( Hindi) : Free Download, Borrow, and Streaming : Internet Archive[^1^]</li>
|
28 |
-
<li>Iron Man 2008 Dual Audio Hindi Dubbed 480p 300mb Download - aFilmywap[^4^]</li>
|
29 |
-
</ol>
|
30 |
-
</li>
|
31 |
-
</ul>
|
32 |
-
|
33 |
-
<p>We hope this article helped you find the best source to download Iron Man (2008) movie in Hindi-Eng. Enjoy watching the movie and let us know your feedback in the comments below.</p>
|
34 |
-
|
35 |
-
<p>Iron Man (2008) is the first movie in the Marvel Cinematic Universe (MCU), a series of superhero movies based on the Marvel Comics characters. The movie was a huge success at the box office and received several awards and nominations, including two Academy Award nominations for Best Sound Editing and Best Visual Effects. The movie also spawned two sequels, Iron Man 2 (2010) and Iron Man 3 (2013), and several crossover movies with other MCU characters.</p>
|
36 |
-
<p>The movie follows the story of Tony Stark, a genius inventor and CEO of Stark Industries, a leading weapons manufacturer. While demonstrating his latest missile in Afghanistan, he is captured by a terrorist group called the Ten Rings, who force him to build a weapon for them. Instead, he secretly builds a miniaturized arc reactor to power his heart and a suit of armor to escape. He then returns to America and announces that he will stop making weapons and use his technology for good. However, he faces opposition from his business partner Obadiah Stane, who wants to use his arc reactor for his own nefarious purposes.</p>
|
37 |
-
<p>Iron Man (2008) is a movie that combines action, humor, and drama in a thrilling and entertaining way. The movie showcases the origin story of one of the most popular and iconic superheroes of all time. The movie also features an impressive cast of actors, including Gwyneth Paltrow as Pepper Potts, Jeff Bridges as Obadiah Stane, Terrence Howard as James Rhodes, and Samuel L. Jackson as Nick Fury. The movie also has a memorable soundtrack composed by Ramin Djawadi and featuring songs by AC/DC, Black Sabbath, and Audioslave.</p> d5da3c52bf<br />
|
38 |
-
<br />
|
39 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Askies by JazziDisciples and Mr JazziQ The Meaning Lyrics and Reviews of the Amapiano Smash Hit.md
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Askies Jazzidisciples: How to Enjoy the Best of Amapiano Music Online</h1>
|
3 |
-
<p>If you are a fan of amapiano music, you have probably heard of askies jazzidisciples, one of the most popular songs in this genre. But do you know what is askies jazzidisciples, who are the artists behind it, and why it is so popular among amapiano fans? In this article, we will answer these questions and show you how to download askies jazzidisciples for free online or stream it on various platforms.</p>
|
4 |
-
<h2>What is Askies Jazzidisciples?</h2>
|
5 |
-
<p>Askies jazzidisciples is a song by Mr JazziQ and JazziDisciples, featuring Josiah De Disciple, FakeLove, Moonchild Sanelly, and MDU aka TRP. It was released in March 2020 as part of Mr JazziQ's debut album Mr JazziQ 0303. The song is a fusion of amapiano, house, kwaito, and gqom elements, creating a unique sound that appeals to a wide audience.</p>
|
6 |
-
<h2>download askies jazzidisciples</h2><br /><p><b><b>Download</b> ✶✶✶ <a href="https://urlin.us/2uSVDH">https://urlin.us/2uSVDH</a></b></p><br /><br /> <h2>Who are the Artists Behind Askies Jazzidisciples?</h2>
|
7 |
-
<p>Askies jazzidisciples is not only a catchy song, but also a showcase of some of the most talented artists in the amapiano scene. Let's take a look at who they are and what they bring to the table.</p>
|
8 |
-
<h3>Mr JazziQ and JazziDisciples</h3>
|
9 |
-
<p>Mr JazziQ, whose real name is Tumelo Manyoni, is a South African amapiano DJ and record producer. He is best known for being a former member of the amapiano DJ duo, JazziDisciples, alongside Josiah De Disciple. The duo started their career in 2018 and released several projects that found their footing on the dancefloor and on streaming platforms, such as The Load Shedding EP, IOP EP, Disciples Of Piano, and 0303. They also collaborated with other amapiano artists like Vigro Deep, Mdu aka TRP, and Kabza De Small. </p>
|
10 |
-
<p>In 2020, Mr JazziQ and Josiah De Disciple decided to split and focus on their individual music careers. Mr JazziQ released his first solo debut album 0303 in March 2020, which featured askies jazzidisciples as well as other hits like Blue Skies, Hello Mo'Girl, and VSOP. His single Askies, which featured singer Moonchild Sanelly and FakeLove, was certified gold by RiSA. He also released other successful singles and albums, such as Umsebenzi Wethu, Amaneighbour, Woza, Party With The English, and All You Need Is Piano. </p>
|
11 |
-
<h3>Josiah De Disciple</h3>
|
12 |
-
<p>Josiah De Disciple, whose real name is Josiah Makoela, is a South African DJ and record producer who was also part of the JazziDisciples duo. He started his DJ career at the age of 14 and producing at the age of 16. He met Mr JazziQ in Alexandra and they formed a partnership that lasted for two years. </p>
|
13 |
-
<p>After going solo in 2020, Josiah De Disciple released his debut studio album Spirits of Makoela – Vol. 2: The Reintroduction in April 2021. The album featured 14 tracks with guest appearances from Kabza De Small, Boohle, Cecil M, Jessica LM, and others. The album received positive reviews from critics and fans who praised its blend of amapiano with jazz and soul elements. Josiah De Disciple also collaborated with Boohle on another album called Umbuso Wabam'nyama in 2020, which included the gold-certified single Mama. </p>
|
14 |
-
<h3>FakeLove</h3>
|
15 |
-
<p>FakeLove is a South African singer and songwriter who is known for his smooth vocals and catchy hooks. He is influenced by various genres of music such as R&B, soul, pop, hip hop, and amapiano. He has worked with several prominent artists in the industry such as Mr JazziQ, Kabza De Small, DJ Maphorisa, Sha Sha, Samthing Soweto, MFR Souls, and more. </p>
|
16 |
-
<p>Some of his notable songs include Askies with Mr JazziQ and Moonchild Sanelly, Nguwe with DJ Maphorisa and Kabza De Small, Banyana with DJ Maphorisa and Tyler ICU, Ntyilo Ntyilo with Rethabile Khumalo, Mali Mali with MFR Souls and Focalistic, and many more. He is also part of the Scorpion Kings Live project by DJ Maphorisa and Kabza De Small. </p>
|
17 |
-
<p>download askies jazzidisciples mp3<br />
|
18 |
-
download askies jazzidisciples fakaza<br />
|
19 |
-
download askies jazzidisciples lyrics<br />
|
20 |
-
download askies jazzidisciples video<br />
|
21 |
-
download askies jazzidisciples song<br />
|
22 |
-
download askies jazzidisciples audio<br />
|
23 |
-
download askies jazzidisciples youtube<br />
|
24 |
-
download askies jazzidisciples spotify<br />
|
25 |
-
download askies jazzidisciples feat moonchild sanelly<br />
|
26 |
-
download askies jazzidisciples feat josiah de disciple<br />
|
27 |
-
download askies jazzidisciples feat mdu aka trp<br />
|
28 |
-
download askies jazzidisciples feat fakelove<br />
|
29 |
-
download askies jazzidisciples amapiano<br />
|
30 |
-
download askies jazzidisciples remix<br />
|
31 |
-
download askies jazzidisciples instrumental<br />
|
32 |
-
download askies jazzidisciples original mix<br />
|
33 |
-
download askies jazzidisciples free mp3<br />
|
34 |
-
download askies jazzidisciples 320kbps<br />
|
35 |
-
download askies jazzidisciples zip file<br />
|
36 |
-
download askies jazzidisciples album mr jazziq 0303<br />
|
37 |
-
download askies jazzidisciples online<br />
|
38 |
-
download askies jazzidisciples on fakaza.com<br />
|
39 |
-
download askies jazzidisciples on zamusic.org<br />
|
40 |
-
download askies jazzidisciples on datafilehost.com<br />
|
41 |
-
download askies jazzidisciples on hiphopza.com<br />
|
42 |
-
how to download askies jazzidisciples<br />
|
43 |
-
where to download askies jazzidisciples<br />
|
44 |
-
best site to download askies jazzidisciples<br />
|
45 |
-
best quality to download askies jazzidisciples<br />
|
46 |
-
best app to download askies jazzidisciples<br />
|
47 |
-
stream or download askies jazzidisciples<br />
|
48 |
-
listen or download askies jazzidisciples<br />
|
49 |
-
play or download askies jazzidisciples<br />
|
50 |
-
share or download askies jazzidisciples<br />
|
51 |
-
rate or download askies jazzidisciples<br />
|
52 |
-
review or download askies jazzidisciples<br />
|
53 |
-
comment or download askies jazzidisciples<br />
|
54 |
-
like or download askies jazzidisciples<br />
|
55 |
-
subscribe or download askies jazzidisciples<br />
|
56 |
-
follow or download askies jazzidisciples</p>
|
57 |
-
<h3>Moonchild Sanelly</h3>
|
58 |
-
<p>Moonchild Sanelly is a South African musician and dancer who is known for her signature blue-colored hair and her self-created music genre called \"Future ghetto punk\". She was born into a musical family in Port Elizabeth and moved to Durban in 2005 to study fashion. She started performing in shows at Durban University of Technology with a focus on poetry and hip hop. She later moved to Johannesburg to pursue her musical career. </p>
|
59 |
-
<p>Moonchild Sanelly has a unique style that combines elements of kwaito, house, dancehall, funk, electronic, R&B, soul, amapiano, and more. She has collaborated with various local and international artists such as Busiswa, Die Antwoord, Beyoncé , Gorillaz , Diplo, and more. </p>
|
60 |
-
<p>Some of her popular songs include Bashiri, Thunda Thighs, Where De Dee Kat, F-Boyz, Newtown Chips, Askies, and many more. She also has her own reality show on MTV Africa called Moonchild Sanelly Woza. </p>
|
61 |
-
<h3>MDU aka TRP</h3>
|
62 |
-
<p>MDU aka TRP is a South African amapiano DJ and record producer who is known for his versatile and innovative sound. He started making music at the age of 13 and was inspired by artists like Black Coffee, DJ Fresh, and Oskido. He has worked with several amapiano heavyweights such as Kabza De Small, DJ Maphorisa, Mr JazziQ, JazziDisciples, MFR Souls, and more. </p>
|
63 |
-
<p>Some of his notable songs include Askies with Mr JazziQ and JazziDisciples, Banyana with DJ Maphorisa and Tyler ICU, Sgubu Se Monati with JazziDisciples and Vigro Deep, Sabanika with Njelic and De Mthuda, 16 Inch with Bongza and Daliwonga, and many more. He also released several EPs and albums such as Pull Up 2, Amapiano Is A Lifestyle Vol. 2, Tales Of The 2 Peers, Boomerang, and more. </p>
|
64 |
-
<h2>Why is Askies Jazzidisciples Popular Among Amapiano Fans?</h2>
|
65 |
-
<p>Askies jazzidisciples is not only a song that features some of the best artists in the amapiano scene, but also a song that captures the essence of amapiano music. Amapiano is a genre of music that originated in South Africa in the early 2010s and has since become a global phenomenon. It is characterized by its use of piano melodies, basslines, drums, percussions, synths, vocals, and samples from various genres such as house, kwaito, jazz, soul, and more. </p>
|
66 |
-
<p>Askies jazzidisciples is a song that showcases the diversity and creativity of amapiano music. It has a catchy chorus that repeats the word \"askies\", which means \"sorry\" or \"excuse me\" in South African slang. It also has a groovy beat that makes you want to dance along. It features different vocal styles from the artists, such as Moonchild Sanelly's energetic rap verses, FakeLove's smooth singing hooks, Josiah De Disciple's soulful harmonies, and MDU aka TRP's signature ad-libs. It also incorporates elements from other genres such as house, gqom, kwaito, and jazz. </p>
|
67 |
-
<p>Askies jazzidisciples is a song that appeals to amapiano fans because it represents the culture and lifestyle of amapiano lovers. It is a song that celebrates the joy of music, dancing, partying, and having fun with friends. It is a song that expresses the attitude and spirit of amapiano fans who are not afraid to say \"askies\" to anyone who tries to stop them from enjoying their lives. </p>
|
68 |
-
<h2>How to Download Askies Jazzidisciples for Free Online?</h2>
|
69 |
-
<p>If you want to download askies jazzidisciples for free online, you have several options to choose from. Here are some of the best ways to download askies jazzidisciples for free online.</p>
|
70 |
-
<h3>Use OKmusi MP3 Downloader</h3>
|
71 |
-
<p>OKmusi MP3 Downloader is a free online tool that allows you to download any MP3 song from YouTube or other websites. You can use it to download askies jazzidisciples for free online by following these steps:</p>
|
72 |
-
<ol>
|
73 |
-
<li>Go to <a href="">https://okmusi.com/</a>.</li>
|
74 |
-
<li>Type \"askies jazzidisciples\" in the search box and click on the search icon.</li>
|
75 |
-
<li>Select the song from the list of results and click on the download button.</li>
|
76 |
-
<li>Choose the quality and format you want and click on the download button again.</li>
|
77 |
-
<li>Wait for the download to finish and enjoy your song.</li>
|
78 |
-
</ol>
|
79 |
-
<p><img src="" alt="OKmusi MP3 Downloader screenshot"></p>
|
80 |
-
<h3>Use Bandcamp</h3>
|
81 |
-
<p>Bandcamp is an online platform that allows independent artists to sell their music directly to fans. You can use it to download askies jazzidisciples for free online by following these steps:</p>
|
82 |
-
<ol>
|
83 |
-
<li>Go to <a href=" can use it to stream askies jazzidisciples online by following these steps:</p>
|
84 |
-
<ol>
|
85 |
-
<li>Download and install Apple Music on your device from <a href="">https://www.apple.com/apple-music/</a>.</li>
|
86 |
-
<li>Open the app and sign in with your Apple ID or create a new one.</li>
|
87 |
-
<li>Tap on the search icon and type \"askies jazzidisciples\" in the search box.</li>
|
88 |
-
<li>Select the song from the list of results and tap on the play button.</li>
|
89 |
-
<li>Enjoy your song.</li>
|
90 |
-
</ol>
|
91 |
-
<p><img src="" alt="Apple Music screenshot"></p>
|
92 |
-
<h3>Use Spotify</h3>
|
93 |
-
<p>Spotify is a streaming service that allows you to access millions of songs, podcasts, playlists, and more. You can use it to stream askies jazzidisciples online by following these steps:</p>
|
94 |
-
<ol>
|
95 |
-
<li>Download and install Spotify on your device from <a href="">https://www.spotify.com/download/</a>.</li>
|
96 |
-
<li>Open the app and sign in with your Spotify account or create a new one.</li>
|
97 |
-
<li>Tap on the search icon and type \"askies jazzidisciples\" in the search box.</li>
|
98 |
-
<li>Select the song from the list of results and tap on the play button.</li>
|
99 |
-
<li>Enjoy your song.</li>
|
100 |
-
</ol>
|
101 |
-
<p><img src="" alt="Spotify screenshot"></p>
|
102 |
-
<h2>Conclusion</h2>
|
103 |
-
<p>Askies jazzidisciples is a song that you should not miss if you are a fan of amapiano music. It is a song that features some of the best artists in the amapiano scene, such as Mr JazziQ, JazziDisciples, Josiah De Disciple, FakeLove, Moonchild Sanelly, and MDU aka TRP. It is a song that showcases the diversity and creativity of amapiano music, blending elements from various genres such as house, kwaito, gqom, and jazz. It is a song that appeals to amapiano fans because it represents the culture and lifestyle of amapiano lovers, celebrating the joy of music, dancing, partying, and having fun with friends.</p>
|
104 |
-
<p>If you want to download askies jazzidisciples for free online or stream it on various platforms, you have several options to choose from. You can use OKmusi MP3 Downloader, Bandcamp, or DatPiff to download askies jazzidisciples for free online. You can also use Shazam, Apple Music, or Spotify to stream askies jazzidisciples online. Whichever option you choose, you will be able to enjoy this amazing song anytime and anywhere.</p>
|
105 |
-
<p>We hope this article has helped you learn more about askies jazzidisciples and how to enjoy it online. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy listening!</p>
|
106 |
-
<h2>FAQs</h2>
|
107 |
-
<p>Here are some of the frequently asked questions related to askies jazzidisciples and their answers.</p>
|
108 |
-
<h4>What does askies mean?</h4>
|
109 |
-
<p>Askies is a South African slang word that means \"sorry\" or \"excuse me\". It is often used as a polite way of apologizing or getting someone's attention. In the context of the song askies jazzidisciples, it is also used as a way of expressing one's attitude and spirit of having fun and not caring about what others think.</p>
|
110 |
-
<h4>Who produced askies jazzidisciples?</h4>
|
111 |
-
<p>Askies jazzidisciples was produced by Mr JazziQ and JazziDisciples, who are both amapiano DJs and record producers. They are also former members of the amapiano duo JazziDisciples, which split in 2020.</p>
|
112 |
-
<h4>Where can I find the lyrics of askies jazzidisciples?</h4>
|
113 |
-
<p>You can find the lyrics of askies jazzidisciples on various websites such as Genius <a href="">https://genius.com/Mr-jazziq-askies-lyrics</a>, Musixmatch <a href="">https://www.musixmatch.com/lyrics/Mr-JazziQ-feat-Josiah-De-Disciple-FakeLove-Moonchild-Sanelly-MDU-aka-TRP/Askies</a>, or Lyrics Translate <a href="">https://lyricstranslate.com/en/askies-sorry.html</a>.</p>
|
114 |
-
<h4>How popular is askies jazzidisciples?</h4>
|
115 |
-
<p>Askies jazzidisciples is one of the most popular songs in the amapiano genre. It has over 1.5 million views on YouTube <a href="">https://www.youtube.com /watch?v=Z0kuBZW4o5I</a>, over 2.7 million streams on Spotify <a href="">https://open.spotify.com/track/6w1y3fQ8dQgXbqZl1HvYmM</a>, and over 3.6 million streams on Apple Music <a href="">https://music.apple.com/za/album/askies-feat-josiah-de-disciple-fakelove-moonchild-sanelly/1501642210?i=1501642216</a>. It also received positive reviews from critics and fans who praised its catchy lyrics, infectious beat, diverse vocals, and danceable vibe.</p>
|
116 |
-
<h4>What are some other songs similar to askies jazzidisciples?</h4>
|
117 |
-
<p>If you like askies jazzidisciples, you might also like some other songs similar to it, such as:</p>
|
118 |
-
<ul>
|
119 |
-
<li>Blue Skies by Mr JazziQ and JazziDisciples, featuring Vigro Deep and Rams De Violinist</li>
|
120 |
-
<li>VSOP by Mr JazziQ, featuring Busta 929, Reece Madlisa, Zuma, Mpura, Riky Rick, and 9umba</li>
|
121 |
-
<li>Mama by Boohle and Josiah De Disciple</li>
|
122 |
-
<li>Ntyilo Ntyilo by Rethabile Khumalo and Master KG</li>
|
123 |
-
<li>Banyana by DJ Maphorisa, Tyler ICU, Sir Trill, Daliwonga, and Kabza De Small</li>
|
124 |
-
</ul></p> 197e85843d<br />
|
125 |
-
<br />
|
126 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/configs/glint360k_r100.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
from easydict import EasyDict as edict
|
2 |
-
|
3 |
-
# make training faster
|
4 |
-
# our RAM is 256G
|
5 |
-
# mount -t tmpfs -o size=140G tmpfs /train_tmp
|
6 |
-
|
7 |
-
config = edict()
|
8 |
-
config.loss = "cosface"
|
9 |
-
config.network = "r100"
|
10 |
-
config.resume = False
|
11 |
-
config.output = None
|
12 |
-
config.embedding_size = 512
|
13 |
-
config.sample_rate = 1.0
|
14 |
-
config.fp16 = True
|
15 |
-
config.momentum = 0.9
|
16 |
-
config.weight_decay = 5e-4
|
17 |
-
config.batch_size = 128
|
18 |
-
config.lr = 0.1 # batch size is 512
|
19 |
-
|
20 |
-
config.rec = "/train_tmp/glint360k"
|
21 |
-
config.num_classes = 360232
|
22 |
-
config.num_image = 17091657
|
23 |
-
config.num_epoch = 20
|
24 |
-
config.warmup_epoch = -1
|
25 |
-
config.decay_epoch = [8, 12, 15, 18]
|
26 |
-
config.val_targets = ["lfw", "cfp_fp", "agedb_30"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7eu7d7/anime-ai-detect-fucker/attacker/FGSM.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
from copy import deepcopy
|
4 |
-
from .base import Attacker
|
5 |
-
from torch.cuda import amp
|
6 |
-
|
7 |
-
class FGSM(Attacker):
|
8 |
-
def __init__(self, model, img_transform=(lambda x:x, lambda x:x), use_amp=False):
|
9 |
-
super().__init__(model, img_transform)
|
10 |
-
self.use_amp=use_amp
|
11 |
-
|
12 |
-
if use_amp:
|
13 |
-
self.scaler = amp.GradScaler()
|
14 |
-
|
15 |
-
def set_para(self, eps=8, alpha=lambda:8, **kwargs):
|
16 |
-
super().set_para(eps=eps, alpha=alpha, **kwargs)
|
17 |
-
|
18 |
-
def step(self, images, labels, loss):
|
19 |
-
with amp.autocast(enabled=self.use_amp):
|
20 |
-
images.requires_grad = True
|
21 |
-
outputs = self.model(images).logits
|
22 |
-
|
23 |
-
self.model.zero_grad()
|
24 |
-
cost = loss(outputs, labels)
|
25 |
-
|
26 |
-
if self.use_amp:
|
27 |
-
self.scaler.scale(cost).backward()
|
28 |
-
else:
|
29 |
-
cost.backward()
|
30 |
-
|
31 |
-
adv_images = (images + self.alpha() * images.grad.sign()).detach_()
|
32 |
-
eta = torch.clamp(adv_images - self.ori_images, min=-self.eps, max=self.eps)
|
33 |
-
images = self.img_transform[0](torch.clamp(self.img_transform[1](self.ori_images + eta), min=0, max=255).detach_())
|
34 |
-
|
35 |
-
return images
|
36 |
-
|
37 |
-
def attack(self, images, labels):
|
38 |
-
#images = deepcopy(images)
|
39 |
-
#self.ori_images = deepcopy(images)
|
40 |
-
|
41 |
-
self.model.eval()
|
42 |
-
|
43 |
-
images = self.forward(self, images, labels)
|
44 |
-
|
45 |
-
self.model.zero_grad()
|
46 |
-
self.model.train()
|
47 |
-
|
48 |
-
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7eu7d7/anime-ai-detect-fucker/attacker/base.py
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
|
2 |
-
class Attacker:
|
3 |
-
def __init__(self, model, img_transform=(lambda x:x, lambda x:x)):
|
4 |
-
self.model = model # 必须是pytorch的model
|
5 |
-
'''self.model.eval()
|
6 |
-
for k, v in self.model.named_parameters():
|
7 |
-
v.requires_grad = False'''
|
8 |
-
self.img_transform=img_transform
|
9 |
-
self.forward = lambda attacker, images, labels: attacker.step(images, labels, attacker.loss)
|
10 |
-
|
11 |
-
def set_para(self, **kwargs):
|
12 |
-
for k,v in kwargs.items():
|
13 |
-
setattr(self, k,v)
|
14 |
-
|
15 |
-
def set_forward(self, forward):
|
16 |
-
self.forward=forward
|
17 |
-
|
18 |
-
def step(self, images, labels, loss):
|
19 |
-
pass
|
20 |
-
|
21 |
-
def set_loss(self, loss):
|
22 |
-
self.loss=loss
|
23 |
-
|
24 |
-
def attack(self, images, labels):
|
25 |
-
pass
|
26 |
-
|
27 |
-
|
28 |
-
class Empty:
|
29 |
-
def __enter__(self):
|
30 |
-
pass
|
31 |
-
|
32 |
-
def __exit__(self, exc_type, exc_val, exc_tb):
|
33 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/Engineering Interviews 4be8039581d04456b0151f2cc4b22130/Questions ede8818b3a0e447f80145905690eb3f6/To Do List Design 9d9cb6c13b4b4a0a8f7ab03a8c98a2d8.md
DELETED
@@ -1,41 +0,0 @@
|
|
1 |
-
# To Do List Design
|
2 |
-
|
3 |
-
Difficulty: Medium
|
4 |
-
Skills: Architecture, Backend, Front end
|
5 |
-
|
6 |
-
# Description
|
7 |
-
|
8 |
-
Write a description for the interview question here.
|
9 |
-
|
10 |
-
# Sample Inputs
|
11 |
-
|
12 |
-
Give some valid inputs the candidate can expect to test their solution with.
|
13 |
-
|
14 |
-
- ...
|
15 |
-
- ...
|
16 |
-
|
17 |
-
# Expected Outputs
|
18 |
-
|
19 |
-
For each sample input above, list the expected output.
|
20 |
-
|
21 |
-
- ...
|
22 |
-
- ...
|
23 |
-
|
24 |
-
# Solutions
|
25 |
-
|
26 |
-
Provide possible solutions in common languages to this problem.
|
27 |
-
|
28 |
-
### Javascript
|
29 |
-
|
30 |
-
```jsx
|
31 |
-
function solution() {
|
32 |
-
|
33 |
-
}
|
34 |
-
```
|
35 |
-
|
36 |
-
### Python
|
37 |
-
|
38 |
-
```python
|
39 |
-
def solution():
|
40 |
-
pass
|
41 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/image_degradation/utils_image.py
DELETED
@@ -1,916 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import math
|
3 |
-
import random
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import cv2
|
7 |
-
from torchvision.utils import make_grid
|
8 |
-
from datetime import datetime
|
9 |
-
#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
|
10 |
-
|
11 |
-
|
12 |
-
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
13 |
-
|
14 |
-
|
15 |
-
'''
|
16 |
-
# --------------------------------------------
|
17 |
-
# Kai Zhang (github: https://github.com/cszn)
|
18 |
-
# 03/Mar/2019
|
19 |
-
# --------------------------------------------
|
20 |
-
# https://github.com/twhui/SRGAN-pyTorch
|
21 |
-
# https://github.com/xinntao/BasicSR
|
22 |
-
# --------------------------------------------
|
23 |
-
'''
|
24 |
-
|
25 |
-
|
26 |
-
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
|
27 |
-
|
28 |
-
|
29 |
-
def is_image_file(filename):
|
30 |
-
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
31 |
-
|
32 |
-
|
33 |
-
def get_timestamp():
|
34 |
-
return datetime.now().strftime('%y%m%d-%H%M%S')
|
35 |
-
|
36 |
-
|
37 |
-
def imshow(x, title=None, cbar=False, figsize=None):
|
38 |
-
plt.figure(figsize=figsize)
|
39 |
-
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
|
40 |
-
if title:
|
41 |
-
plt.title(title)
|
42 |
-
if cbar:
|
43 |
-
plt.colorbar()
|
44 |
-
plt.show()
|
45 |
-
|
46 |
-
|
47 |
-
def surf(Z, cmap='rainbow', figsize=None):
|
48 |
-
plt.figure(figsize=figsize)
|
49 |
-
ax3 = plt.axes(projection='3d')
|
50 |
-
|
51 |
-
w, h = Z.shape[:2]
|
52 |
-
xx = np.arange(0,w,1)
|
53 |
-
yy = np.arange(0,h,1)
|
54 |
-
X, Y = np.meshgrid(xx, yy)
|
55 |
-
ax3.plot_surface(X,Y,Z,cmap=cmap)
|
56 |
-
#ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
|
57 |
-
plt.show()
|
58 |
-
|
59 |
-
|
60 |
-
'''
|
61 |
-
# --------------------------------------------
|
62 |
-
# get image pathes
|
63 |
-
# --------------------------------------------
|
64 |
-
'''
|
65 |
-
|
66 |
-
|
67 |
-
def get_image_paths(dataroot):
|
68 |
-
paths = None # return None if dataroot is None
|
69 |
-
if dataroot is not None:
|
70 |
-
paths = sorted(_get_paths_from_images(dataroot))
|
71 |
-
return paths
|
72 |
-
|
73 |
-
|
74 |
-
def _get_paths_from_images(path):
|
75 |
-
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
|
76 |
-
images = []
|
77 |
-
for dirpath, _, fnames in sorted(os.walk(path)):
|
78 |
-
for fname in sorted(fnames):
|
79 |
-
if is_image_file(fname):
|
80 |
-
img_path = os.path.join(dirpath, fname)
|
81 |
-
images.append(img_path)
|
82 |
-
assert images, '{:s} has no valid image file'.format(path)
|
83 |
-
return images
|
84 |
-
|
85 |
-
|
86 |
-
'''
|
87 |
-
# --------------------------------------------
|
88 |
-
# split large images into small images
|
89 |
-
# --------------------------------------------
|
90 |
-
'''
|
91 |
-
|
92 |
-
|
93 |
-
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
|
94 |
-
w, h = img.shape[:2]
|
95 |
-
patches = []
|
96 |
-
if w > p_max and h > p_max:
|
97 |
-
w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
|
98 |
-
h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
|
99 |
-
w1.append(w-p_size)
|
100 |
-
h1.append(h-p_size)
|
101 |
-
# print(w1)
|
102 |
-
# print(h1)
|
103 |
-
for i in w1:
|
104 |
-
for j in h1:
|
105 |
-
patches.append(img[i:i+p_size, j:j+p_size,:])
|
106 |
-
else:
|
107 |
-
patches.append(img)
|
108 |
-
|
109 |
-
return patches
|
110 |
-
|
111 |
-
|
112 |
-
def imssave(imgs, img_path):
|
113 |
-
"""
|
114 |
-
imgs: list, N images of size WxHxC
|
115 |
-
"""
|
116 |
-
img_name, ext = os.path.splitext(os.path.basename(img_path))
|
117 |
-
|
118 |
-
for i, img in enumerate(imgs):
|
119 |
-
if img.ndim == 3:
|
120 |
-
img = img[:, :, [2, 1, 0]]
|
121 |
-
new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
|
122 |
-
cv2.imwrite(new_path, img)
|
123 |
-
|
124 |
-
|
125 |
-
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
|
126 |
-
"""
|
127 |
-
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
|
128 |
-
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
|
129 |
-
will be splitted.
|
130 |
-
Args:
|
131 |
-
original_dataroot:
|
132 |
-
taget_dataroot:
|
133 |
-
p_size: size of small images
|
134 |
-
p_overlap: patch size in training is a good choice
|
135 |
-
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
|
136 |
-
"""
|
137 |
-
paths = get_image_paths(original_dataroot)
|
138 |
-
for img_path in paths:
|
139 |
-
# img_name, ext = os.path.splitext(os.path.basename(img_path))
|
140 |
-
img = imread_uint(img_path, n_channels=n_channels)
|
141 |
-
patches = patches_from_image(img, p_size, p_overlap, p_max)
|
142 |
-
imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
|
143 |
-
#if original_dataroot == taget_dataroot:
|
144 |
-
#del img_path
|
145 |
-
|
146 |
-
'''
|
147 |
-
# --------------------------------------------
|
148 |
-
# makedir
|
149 |
-
# --------------------------------------------
|
150 |
-
'''
|
151 |
-
|
152 |
-
|
153 |
-
def mkdir(path):
|
154 |
-
if not os.path.exists(path):
|
155 |
-
os.makedirs(path)
|
156 |
-
|
157 |
-
|
158 |
-
def mkdirs(paths):
|
159 |
-
if isinstance(paths, str):
|
160 |
-
mkdir(paths)
|
161 |
-
else:
|
162 |
-
for path in paths:
|
163 |
-
mkdir(path)
|
164 |
-
|
165 |
-
|
166 |
-
def mkdir_and_rename(path):
|
167 |
-
if os.path.exists(path):
|
168 |
-
new_name = path + '_archived_' + get_timestamp()
|
169 |
-
print('Path already exists. Rename it to [{:s}]'.format(new_name))
|
170 |
-
os.rename(path, new_name)
|
171 |
-
os.makedirs(path)
|
172 |
-
|
173 |
-
|
174 |
-
'''
|
175 |
-
# --------------------------------------------
|
176 |
-
# read image from path
|
177 |
-
# opencv is fast, but read BGR numpy image
|
178 |
-
# --------------------------------------------
|
179 |
-
'''
|
180 |
-
|
181 |
-
|
182 |
-
# --------------------------------------------
|
183 |
-
# get uint8 image of size HxWxn_channles (RGB)
|
184 |
-
# --------------------------------------------
|
185 |
-
def imread_uint(path, n_channels=3):
|
186 |
-
# input: path
|
187 |
-
# output: HxWx3(RGB or GGG), or HxWx1 (G)
|
188 |
-
if n_channels == 1:
|
189 |
-
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
|
190 |
-
img = np.expand_dims(img, axis=2) # HxWx1
|
191 |
-
elif n_channels == 3:
|
192 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
|
193 |
-
if img.ndim == 2:
|
194 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
|
195 |
-
else:
|
196 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
|
197 |
-
return img
|
198 |
-
|
199 |
-
|
200 |
-
# --------------------------------------------
|
201 |
-
# matlab's imwrite
|
202 |
-
# --------------------------------------------
|
203 |
-
def imsave(img, img_path):
|
204 |
-
img = np.squeeze(img)
|
205 |
-
if img.ndim == 3:
|
206 |
-
img = img[:, :, [2, 1, 0]]
|
207 |
-
cv2.imwrite(img_path, img)
|
208 |
-
|
209 |
-
def imwrite(img, img_path):
|
210 |
-
img = np.squeeze(img)
|
211 |
-
if img.ndim == 3:
|
212 |
-
img = img[:, :, [2, 1, 0]]
|
213 |
-
cv2.imwrite(img_path, img)
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
# --------------------------------------------
|
218 |
-
# get single image of size HxWxn_channles (BGR)
|
219 |
-
# --------------------------------------------
|
220 |
-
def read_img(path):
|
221 |
-
# read image by cv2
|
222 |
-
# return: Numpy float32, HWC, BGR, [0,1]
|
223 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
|
224 |
-
img = img.astype(np.float32) / 255.
|
225 |
-
if img.ndim == 2:
|
226 |
-
img = np.expand_dims(img, axis=2)
|
227 |
-
# some images have 4 channels
|
228 |
-
if img.shape[2] > 3:
|
229 |
-
img = img[:, :, :3]
|
230 |
-
return img
|
231 |
-
|
232 |
-
|
233 |
-
'''
|
234 |
-
# --------------------------------------------
|
235 |
-
# image format conversion
|
236 |
-
# --------------------------------------------
|
237 |
-
# numpy(single) <---> numpy(unit)
|
238 |
-
# numpy(single) <---> tensor
|
239 |
-
# numpy(unit) <---> tensor
|
240 |
-
# --------------------------------------------
|
241 |
-
'''
|
242 |
-
|
243 |
-
|
244 |
-
# --------------------------------------------
|
245 |
-
# numpy(single) [0, 1] <---> numpy(unit)
|
246 |
-
# --------------------------------------------
|
247 |
-
|
248 |
-
|
249 |
-
def uint2single(img):
|
250 |
-
|
251 |
-
return np.float32(img/255.)
|
252 |
-
|
253 |
-
|
254 |
-
def single2uint(img):
|
255 |
-
|
256 |
-
return np.uint8((img.clip(0, 1)*255.).round())
|
257 |
-
|
258 |
-
|
259 |
-
def uint162single(img):
|
260 |
-
|
261 |
-
return np.float32(img/65535.)
|
262 |
-
|
263 |
-
|
264 |
-
def single2uint16(img):
|
265 |
-
|
266 |
-
return np.uint16((img.clip(0, 1)*65535.).round())
|
267 |
-
|
268 |
-
|
269 |
-
# --------------------------------------------
|
270 |
-
# numpy(unit) (HxWxC or HxW) <---> tensor
|
271 |
-
# --------------------------------------------
|
272 |
-
|
273 |
-
|
274 |
-
# convert uint to 4-dimensional torch tensor
|
275 |
-
def uint2tensor4(img):
|
276 |
-
if img.ndim == 2:
|
277 |
-
img = np.expand_dims(img, axis=2)
|
278 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
|
279 |
-
|
280 |
-
|
281 |
-
# convert uint to 3-dimensional torch tensor
|
282 |
-
def uint2tensor3(img):
|
283 |
-
if img.ndim == 2:
|
284 |
-
img = np.expand_dims(img, axis=2)
|
285 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
|
286 |
-
|
287 |
-
|
288 |
-
# convert 2/3/4-dimensional torch tensor to uint
|
289 |
-
def tensor2uint(img):
|
290 |
-
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
|
291 |
-
if img.ndim == 3:
|
292 |
-
img = np.transpose(img, (1, 2, 0))
|
293 |
-
return np.uint8((img*255.0).round())
|
294 |
-
|
295 |
-
|
296 |
-
# --------------------------------------------
|
297 |
-
# numpy(single) (HxWxC) <---> tensor
|
298 |
-
# --------------------------------------------
|
299 |
-
|
300 |
-
|
301 |
-
# convert single (HxWxC) to 3-dimensional torch tensor
|
302 |
-
def single2tensor3(img):
|
303 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
|
304 |
-
|
305 |
-
|
306 |
-
# convert single (HxWxC) to 4-dimensional torch tensor
|
307 |
-
def single2tensor4(img):
|
308 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
|
309 |
-
|
310 |
-
|
311 |
-
# convert torch tensor to single
|
312 |
-
def tensor2single(img):
|
313 |
-
img = img.data.squeeze().float().cpu().numpy()
|
314 |
-
if img.ndim == 3:
|
315 |
-
img = np.transpose(img, (1, 2, 0))
|
316 |
-
|
317 |
-
return img
|
318 |
-
|
319 |
-
# convert torch tensor to single
|
320 |
-
def tensor2single3(img):
|
321 |
-
img = img.data.squeeze().float().cpu().numpy()
|
322 |
-
if img.ndim == 3:
|
323 |
-
img = np.transpose(img, (1, 2, 0))
|
324 |
-
elif img.ndim == 2:
|
325 |
-
img = np.expand_dims(img, axis=2)
|
326 |
-
return img
|
327 |
-
|
328 |
-
|
329 |
-
def single2tensor5(img):
|
330 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
|
331 |
-
|
332 |
-
|
333 |
-
def single32tensor5(img):
|
334 |
-
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
|
335 |
-
|
336 |
-
|
337 |
-
def single42tensor4(img):
|
338 |
-
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
|
339 |
-
|
340 |
-
|
341 |
-
# from skimage.io import imread, imsave
|
342 |
-
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
|
343 |
-
'''
|
344 |
-
Converts a torch Tensor into an image Numpy array of BGR channel order
|
345 |
-
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
|
346 |
-
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
|
347 |
-
'''
|
348 |
-
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
|
349 |
-
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
|
350 |
-
n_dim = tensor.dim()
|
351 |
-
if n_dim == 4:
|
352 |
-
n_img = len(tensor)
|
353 |
-
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
|
354 |
-
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
|
355 |
-
elif n_dim == 3:
|
356 |
-
img_np = tensor.numpy()
|
357 |
-
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
|
358 |
-
elif n_dim == 2:
|
359 |
-
img_np = tensor.numpy()
|
360 |
-
else:
|
361 |
-
raise TypeError(
|
362 |
-
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
|
363 |
-
if out_type == np.uint8:
|
364 |
-
img_np = (img_np * 255.0).round()
|
365 |
-
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
|
366 |
-
return img_np.astype(out_type)
|
367 |
-
|
368 |
-
|
369 |
-
'''
|
370 |
-
# --------------------------------------------
|
371 |
-
# Augmentation, flipe and/or rotate
|
372 |
-
# --------------------------------------------
|
373 |
-
# The following two are enough.
|
374 |
-
# (1) augmet_img: numpy image of WxHxC or WxH
|
375 |
-
# (2) augment_img_tensor4: tensor image 1xCxWxH
|
376 |
-
# --------------------------------------------
|
377 |
-
'''
|
378 |
-
|
379 |
-
|
380 |
-
def augment_img(img, mode=0):
|
381 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
382 |
-
'''
|
383 |
-
if mode == 0:
|
384 |
-
return img
|
385 |
-
elif mode == 1:
|
386 |
-
return np.flipud(np.rot90(img))
|
387 |
-
elif mode == 2:
|
388 |
-
return np.flipud(img)
|
389 |
-
elif mode == 3:
|
390 |
-
return np.rot90(img, k=3)
|
391 |
-
elif mode == 4:
|
392 |
-
return np.flipud(np.rot90(img, k=2))
|
393 |
-
elif mode == 5:
|
394 |
-
return np.rot90(img)
|
395 |
-
elif mode == 6:
|
396 |
-
return np.rot90(img, k=2)
|
397 |
-
elif mode == 7:
|
398 |
-
return np.flipud(np.rot90(img, k=3))
|
399 |
-
|
400 |
-
|
401 |
-
def augment_img_tensor4(img, mode=0):
|
402 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
403 |
-
'''
|
404 |
-
if mode == 0:
|
405 |
-
return img
|
406 |
-
elif mode == 1:
|
407 |
-
return img.rot90(1, [2, 3]).flip([2])
|
408 |
-
elif mode == 2:
|
409 |
-
return img.flip([2])
|
410 |
-
elif mode == 3:
|
411 |
-
return img.rot90(3, [2, 3])
|
412 |
-
elif mode == 4:
|
413 |
-
return img.rot90(2, [2, 3]).flip([2])
|
414 |
-
elif mode == 5:
|
415 |
-
return img.rot90(1, [2, 3])
|
416 |
-
elif mode == 6:
|
417 |
-
return img.rot90(2, [2, 3])
|
418 |
-
elif mode == 7:
|
419 |
-
return img.rot90(3, [2, 3]).flip([2])
|
420 |
-
|
421 |
-
|
422 |
-
def augment_img_tensor(img, mode=0):
|
423 |
-
'''Kai Zhang (github: https://github.com/cszn)
|
424 |
-
'''
|
425 |
-
img_size = img.size()
|
426 |
-
img_np = img.data.cpu().numpy()
|
427 |
-
if len(img_size) == 3:
|
428 |
-
img_np = np.transpose(img_np, (1, 2, 0))
|
429 |
-
elif len(img_size) == 4:
|
430 |
-
img_np = np.transpose(img_np, (2, 3, 1, 0))
|
431 |
-
img_np = augment_img(img_np, mode=mode)
|
432 |
-
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
|
433 |
-
if len(img_size) == 3:
|
434 |
-
img_tensor = img_tensor.permute(2, 0, 1)
|
435 |
-
elif len(img_size) == 4:
|
436 |
-
img_tensor = img_tensor.permute(3, 2, 0, 1)
|
437 |
-
|
438 |
-
return img_tensor.type_as(img)
|
439 |
-
|
440 |
-
|
441 |
-
def augment_img_np3(img, mode=0):
|
442 |
-
if mode == 0:
|
443 |
-
return img
|
444 |
-
elif mode == 1:
|
445 |
-
return img.transpose(1, 0, 2)
|
446 |
-
elif mode == 2:
|
447 |
-
return img[::-1, :, :]
|
448 |
-
elif mode == 3:
|
449 |
-
img = img[::-1, :, :]
|
450 |
-
img = img.transpose(1, 0, 2)
|
451 |
-
return img
|
452 |
-
elif mode == 4:
|
453 |
-
return img[:, ::-1, :]
|
454 |
-
elif mode == 5:
|
455 |
-
img = img[:, ::-1, :]
|
456 |
-
img = img.transpose(1, 0, 2)
|
457 |
-
return img
|
458 |
-
elif mode == 6:
|
459 |
-
img = img[:, ::-1, :]
|
460 |
-
img = img[::-1, :, :]
|
461 |
-
return img
|
462 |
-
elif mode == 7:
|
463 |
-
img = img[:, ::-1, :]
|
464 |
-
img = img[::-1, :, :]
|
465 |
-
img = img.transpose(1, 0, 2)
|
466 |
-
return img
|
467 |
-
|
468 |
-
|
469 |
-
def augment_imgs(img_list, hflip=True, rot=True):
|
470 |
-
# horizontal flip OR rotate
|
471 |
-
hflip = hflip and random.random() < 0.5
|
472 |
-
vflip = rot and random.random() < 0.5
|
473 |
-
rot90 = rot and random.random() < 0.5
|
474 |
-
|
475 |
-
def _augment(img):
|
476 |
-
if hflip:
|
477 |
-
img = img[:, ::-1, :]
|
478 |
-
if vflip:
|
479 |
-
img = img[::-1, :, :]
|
480 |
-
if rot90:
|
481 |
-
img = img.transpose(1, 0, 2)
|
482 |
-
return img
|
483 |
-
|
484 |
-
return [_augment(img) for img in img_list]
|
485 |
-
|
486 |
-
|
487 |
-
'''
|
488 |
-
# --------------------------------------------
|
489 |
-
# modcrop and shave
|
490 |
-
# --------------------------------------------
|
491 |
-
'''
|
492 |
-
|
493 |
-
|
494 |
-
def modcrop(img_in, scale):
|
495 |
-
# img_in: Numpy, HWC or HW
|
496 |
-
img = np.copy(img_in)
|
497 |
-
if img.ndim == 2:
|
498 |
-
H, W = img.shape
|
499 |
-
H_r, W_r = H % scale, W % scale
|
500 |
-
img = img[:H - H_r, :W - W_r]
|
501 |
-
elif img.ndim == 3:
|
502 |
-
H, W, C = img.shape
|
503 |
-
H_r, W_r = H % scale, W % scale
|
504 |
-
img = img[:H - H_r, :W - W_r, :]
|
505 |
-
else:
|
506 |
-
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
|
507 |
-
return img
|
508 |
-
|
509 |
-
|
510 |
-
def shave(img_in, border=0):
|
511 |
-
# img_in: Numpy, HWC or HW
|
512 |
-
img = np.copy(img_in)
|
513 |
-
h, w = img.shape[:2]
|
514 |
-
img = img[border:h-border, border:w-border]
|
515 |
-
return img
|
516 |
-
|
517 |
-
|
518 |
-
'''
|
519 |
-
# --------------------------------------------
|
520 |
-
# image processing process on numpy image
|
521 |
-
# channel_convert(in_c, tar_type, img_list):
|
522 |
-
# rgb2ycbcr(img, only_y=True):
|
523 |
-
# bgr2ycbcr(img, only_y=True):
|
524 |
-
# ycbcr2rgb(img):
|
525 |
-
# --------------------------------------------
|
526 |
-
'''
|
527 |
-
|
528 |
-
|
529 |
-
def rgb2ycbcr(img, only_y=True):
|
530 |
-
'''same as matlab rgb2ycbcr
|
531 |
-
only_y: only return Y channel
|
532 |
-
Input:
|
533 |
-
uint8, [0, 255]
|
534 |
-
float, [0, 1]
|
535 |
-
'''
|
536 |
-
in_img_type = img.dtype
|
537 |
-
img.astype(np.float32)
|
538 |
-
if in_img_type != np.uint8:
|
539 |
-
img *= 255.
|
540 |
-
# convert
|
541 |
-
if only_y:
|
542 |
-
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
|
543 |
-
else:
|
544 |
-
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
|
545 |
-
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
|
546 |
-
if in_img_type == np.uint8:
|
547 |
-
rlt = rlt.round()
|
548 |
-
else:
|
549 |
-
rlt /= 255.
|
550 |
-
return rlt.astype(in_img_type)
|
551 |
-
|
552 |
-
|
553 |
-
def ycbcr2rgb(img):
|
554 |
-
'''same as matlab ycbcr2rgb
|
555 |
-
Input:
|
556 |
-
uint8, [0, 255]
|
557 |
-
float, [0, 1]
|
558 |
-
'''
|
559 |
-
in_img_type = img.dtype
|
560 |
-
img.astype(np.float32)
|
561 |
-
if in_img_type != np.uint8:
|
562 |
-
img *= 255.
|
563 |
-
# convert
|
564 |
-
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
|
565 |
-
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
|
566 |
-
if in_img_type == np.uint8:
|
567 |
-
rlt = rlt.round()
|
568 |
-
else:
|
569 |
-
rlt /= 255.
|
570 |
-
return rlt.astype(in_img_type)
|
571 |
-
|
572 |
-
|
573 |
-
def bgr2ycbcr(img, only_y=True):
|
574 |
-
'''bgr version of rgb2ycbcr
|
575 |
-
only_y: only return Y channel
|
576 |
-
Input:
|
577 |
-
uint8, [0, 255]
|
578 |
-
float, [0, 1]
|
579 |
-
'''
|
580 |
-
in_img_type = img.dtype
|
581 |
-
img.astype(np.float32)
|
582 |
-
if in_img_type != np.uint8:
|
583 |
-
img *= 255.
|
584 |
-
# convert
|
585 |
-
if only_y:
|
586 |
-
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
|
587 |
-
else:
|
588 |
-
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
|
589 |
-
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
|
590 |
-
if in_img_type == np.uint8:
|
591 |
-
rlt = rlt.round()
|
592 |
-
else:
|
593 |
-
rlt /= 255.
|
594 |
-
return rlt.astype(in_img_type)
|
595 |
-
|
596 |
-
|
597 |
-
def channel_convert(in_c, tar_type, img_list):
|
598 |
-
# conversion among BGR, gray and y
|
599 |
-
if in_c == 3 and tar_type == 'gray': # BGR to gray
|
600 |
-
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
|
601 |
-
return [np.expand_dims(img, axis=2) for img in gray_list]
|
602 |
-
elif in_c == 3 and tar_type == 'y': # BGR to y
|
603 |
-
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
|
604 |
-
return [np.expand_dims(img, axis=2) for img in y_list]
|
605 |
-
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
|
606 |
-
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
|
607 |
-
else:
|
608 |
-
return img_list
|
609 |
-
|
610 |
-
|
611 |
-
'''
|
612 |
-
# --------------------------------------------
|
613 |
-
# metric, PSNR and SSIM
|
614 |
-
# --------------------------------------------
|
615 |
-
'''
|
616 |
-
|
617 |
-
|
618 |
-
# --------------------------------------------
|
619 |
-
# PSNR
|
620 |
-
# --------------------------------------------
|
621 |
-
def calculate_psnr(img1, img2, border=0):
|
622 |
-
# img1 and img2 have range [0, 255]
|
623 |
-
#img1 = img1.squeeze()
|
624 |
-
#img2 = img2.squeeze()
|
625 |
-
if not img1.shape == img2.shape:
|
626 |
-
raise ValueError('Input images must have the same dimensions.')
|
627 |
-
h, w = img1.shape[:2]
|
628 |
-
img1 = img1[border:h-border, border:w-border]
|
629 |
-
img2 = img2[border:h-border, border:w-border]
|
630 |
-
|
631 |
-
img1 = img1.astype(np.float64)
|
632 |
-
img2 = img2.astype(np.float64)
|
633 |
-
mse = np.mean((img1 - img2)**2)
|
634 |
-
if mse == 0:
|
635 |
-
return float('inf')
|
636 |
-
return 20 * math.log10(255.0 / math.sqrt(mse))
|
637 |
-
|
638 |
-
|
639 |
-
# --------------------------------------------
|
640 |
-
# SSIM
|
641 |
-
# --------------------------------------------
|
642 |
-
def calculate_ssim(img1, img2, border=0):
|
643 |
-
'''calculate SSIM
|
644 |
-
the same outputs as MATLAB's
|
645 |
-
img1, img2: [0, 255]
|
646 |
-
'''
|
647 |
-
#img1 = img1.squeeze()
|
648 |
-
#img2 = img2.squeeze()
|
649 |
-
if not img1.shape == img2.shape:
|
650 |
-
raise ValueError('Input images must have the same dimensions.')
|
651 |
-
h, w = img1.shape[:2]
|
652 |
-
img1 = img1[border:h-border, border:w-border]
|
653 |
-
img2 = img2[border:h-border, border:w-border]
|
654 |
-
|
655 |
-
if img1.ndim == 2:
|
656 |
-
return ssim(img1, img2)
|
657 |
-
elif img1.ndim == 3:
|
658 |
-
if img1.shape[2] == 3:
|
659 |
-
ssims = []
|
660 |
-
for i in range(3):
|
661 |
-
ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
|
662 |
-
return np.array(ssims).mean()
|
663 |
-
elif img1.shape[2] == 1:
|
664 |
-
return ssim(np.squeeze(img1), np.squeeze(img2))
|
665 |
-
else:
|
666 |
-
raise ValueError('Wrong input image dimensions.')
|
667 |
-
|
668 |
-
|
669 |
-
def ssim(img1, img2):
|
670 |
-
C1 = (0.01 * 255)**2
|
671 |
-
C2 = (0.03 * 255)**2
|
672 |
-
|
673 |
-
img1 = img1.astype(np.float64)
|
674 |
-
img2 = img2.astype(np.float64)
|
675 |
-
kernel = cv2.getGaussianKernel(11, 1.5)
|
676 |
-
window = np.outer(kernel, kernel.transpose())
|
677 |
-
|
678 |
-
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
|
679 |
-
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
|
680 |
-
mu1_sq = mu1**2
|
681 |
-
mu2_sq = mu2**2
|
682 |
-
mu1_mu2 = mu1 * mu2
|
683 |
-
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
|
684 |
-
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
|
685 |
-
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
|
686 |
-
|
687 |
-
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
|
688 |
-
(sigma1_sq + sigma2_sq + C2))
|
689 |
-
return ssim_map.mean()
|
690 |
-
|
691 |
-
|
692 |
-
'''
|
693 |
-
# --------------------------------------------
|
694 |
-
# matlab's bicubic imresize (numpy and torch) [0, 1]
|
695 |
-
# --------------------------------------------
|
696 |
-
'''
|
697 |
-
|
698 |
-
|
699 |
-
# matlab 'imresize' function, now only support 'bicubic'
|
700 |
-
def cubic(x):
|
701 |
-
absx = torch.abs(x)
|
702 |
-
absx2 = absx**2
|
703 |
-
absx3 = absx**3
|
704 |
-
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
|
705 |
-
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
|
706 |
-
|
707 |
-
|
708 |
-
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
|
709 |
-
if (scale < 1) and (antialiasing):
|
710 |
-
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
|
711 |
-
kernel_width = kernel_width / scale
|
712 |
-
|
713 |
-
# Output-space coordinates
|
714 |
-
x = torch.linspace(1, out_length, out_length)
|
715 |
-
|
716 |
-
# Input-space coordinates. Calculate the inverse mapping such that 0.5
|
717 |
-
# in output space maps to 0.5 in input space, and 0.5+scale in output
|
718 |
-
# space maps to 1.5 in input space.
|
719 |
-
u = x / scale + 0.5 * (1 - 1 / scale)
|
720 |
-
|
721 |
-
# What is the left-most pixel that can be involved in the computation?
|
722 |
-
left = torch.floor(u - kernel_width / 2)
|
723 |
-
|
724 |
-
# What is the maximum number of pixels that can be involved in the
|
725 |
-
# computation? Note: it's OK to use an extra pixel here; if the
|
726 |
-
# corresponding weights are all zero, it will be eliminated at the end
|
727 |
-
# of this function.
|
728 |
-
P = math.ceil(kernel_width) + 2
|
729 |
-
|
730 |
-
# The indices of the input pixels involved in computing the k-th output
|
731 |
-
# pixel are in row k of the indices matrix.
|
732 |
-
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
|
733 |
-
1, P).expand(out_length, P)
|
734 |
-
|
735 |
-
# The weights used to compute the k-th output pixel are in row k of the
|
736 |
-
# weights matrix.
|
737 |
-
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
|
738 |
-
# apply cubic kernel
|
739 |
-
if (scale < 1) and (antialiasing):
|
740 |
-
weights = scale * cubic(distance_to_center * scale)
|
741 |
-
else:
|
742 |
-
weights = cubic(distance_to_center)
|
743 |
-
# Normalize the weights matrix so that each row sums to 1.
|
744 |
-
weights_sum = torch.sum(weights, 1).view(out_length, 1)
|
745 |
-
weights = weights / weights_sum.expand(out_length, P)
|
746 |
-
|
747 |
-
# If a column in weights is all zero, get rid of it. only consider the first and last column.
|
748 |
-
weights_zero_tmp = torch.sum((weights == 0), 0)
|
749 |
-
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
|
750 |
-
indices = indices.narrow(1, 1, P - 2)
|
751 |
-
weights = weights.narrow(1, 1, P - 2)
|
752 |
-
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
|
753 |
-
indices = indices.narrow(1, 0, P - 2)
|
754 |
-
weights = weights.narrow(1, 0, P - 2)
|
755 |
-
weights = weights.contiguous()
|
756 |
-
indices = indices.contiguous()
|
757 |
-
sym_len_s = -indices.min() + 1
|
758 |
-
sym_len_e = indices.max() - in_length
|
759 |
-
indices = indices + sym_len_s - 1
|
760 |
-
return weights, indices, int(sym_len_s), int(sym_len_e)
|
761 |
-
|
762 |
-
|
763 |
-
# --------------------------------------------
|
764 |
-
# imresize for tensor image [0, 1]
|
765 |
-
# --------------------------------------------
|
766 |
-
def imresize(img, scale, antialiasing=True):
|
767 |
-
# Now the scale should be the same for H and W
|
768 |
-
# input: img: pytorch tensor, CHW or HW [0,1]
|
769 |
-
# output: CHW or HW [0,1] w/o round
|
770 |
-
need_squeeze = True if img.dim() == 2 else False
|
771 |
-
if need_squeeze:
|
772 |
-
img.unsqueeze_(0)
|
773 |
-
in_C, in_H, in_W = img.size()
|
774 |
-
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
775 |
-
kernel_width = 4
|
776 |
-
kernel = 'cubic'
|
777 |
-
|
778 |
-
# Return the desired dimension order for performing the resize. The
|
779 |
-
# strategy is to perform the resize first along the dimension with the
|
780 |
-
# smallest scale factor.
|
781 |
-
# Now we do not support this.
|
782 |
-
|
783 |
-
# get weights and indices
|
784 |
-
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
785 |
-
in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
786 |
-
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
787 |
-
in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
788 |
-
# process H dimension
|
789 |
-
# symmetric copying
|
790 |
-
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
|
791 |
-
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
|
792 |
-
|
793 |
-
sym_patch = img[:, :sym_len_Hs, :]
|
794 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
795 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
796 |
-
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
|
797 |
-
|
798 |
-
sym_patch = img[:, -sym_len_He:, :]
|
799 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
800 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
801 |
-
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
802 |
-
|
803 |
-
out_1 = torch.FloatTensor(in_C, out_H, in_W)
|
804 |
-
kernel_width = weights_H.size(1)
|
805 |
-
for i in range(out_H):
|
806 |
-
idx = int(indices_H[i][0])
|
807 |
-
for j in range(out_C):
|
808 |
-
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
|
809 |
-
|
810 |
-
# process W dimension
|
811 |
-
# symmetric copying
|
812 |
-
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
|
813 |
-
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
|
814 |
-
|
815 |
-
sym_patch = out_1[:, :, :sym_len_Ws]
|
816 |
-
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
817 |
-
sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
818 |
-
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
|
819 |
-
|
820 |
-
sym_patch = out_1[:, :, -sym_len_We:]
|
821 |
-
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
|
822 |
-
sym_patch_inv = sym_patch.index_select(2, inv_idx)
|
823 |
-
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
824 |
-
|
825 |
-
out_2 = torch.FloatTensor(in_C, out_H, out_W)
|
826 |
-
kernel_width = weights_W.size(1)
|
827 |
-
for i in range(out_W):
|
828 |
-
idx = int(indices_W[i][0])
|
829 |
-
for j in range(out_C):
|
830 |
-
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
|
831 |
-
if need_squeeze:
|
832 |
-
out_2.squeeze_()
|
833 |
-
return out_2
|
834 |
-
|
835 |
-
|
836 |
-
# --------------------------------------------
|
837 |
-
# imresize for numpy image [0, 1]
|
838 |
-
# --------------------------------------------
|
839 |
-
def imresize_np(img, scale, antialiasing=True):
|
840 |
-
# Now the scale should be the same for H and W
|
841 |
-
# input: img: Numpy, HWC or HW [0,1]
|
842 |
-
# output: HWC or HW [0,1] w/o round
|
843 |
-
img = torch.from_numpy(img)
|
844 |
-
need_squeeze = True if img.dim() == 2 else False
|
845 |
-
if need_squeeze:
|
846 |
-
img.unsqueeze_(2)
|
847 |
-
|
848 |
-
in_H, in_W, in_C = img.size()
|
849 |
-
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
|
850 |
-
kernel_width = 4
|
851 |
-
kernel = 'cubic'
|
852 |
-
|
853 |
-
# Return the desired dimension order for performing the resize. The
|
854 |
-
# strategy is to perform the resize first along the dimension with the
|
855 |
-
# smallest scale factor.
|
856 |
-
# Now we do not support this.
|
857 |
-
|
858 |
-
# get weights and indices
|
859 |
-
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
|
860 |
-
in_H, out_H, scale, kernel, kernel_width, antialiasing)
|
861 |
-
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
|
862 |
-
in_W, out_W, scale, kernel, kernel_width, antialiasing)
|
863 |
-
# process H dimension
|
864 |
-
# symmetric copying
|
865 |
-
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
|
866 |
-
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
|
867 |
-
|
868 |
-
sym_patch = img[:sym_len_Hs, :, :]
|
869 |
-
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
870 |
-
sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
871 |
-
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
|
872 |
-
|
873 |
-
sym_patch = img[-sym_len_He:, :, :]
|
874 |
-
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
|
875 |
-
sym_patch_inv = sym_patch.index_select(0, inv_idx)
|
876 |
-
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
|
877 |
-
|
878 |
-
out_1 = torch.FloatTensor(out_H, in_W, in_C)
|
879 |
-
kernel_width = weights_H.size(1)
|
880 |
-
for i in range(out_H):
|
881 |
-
idx = int(indices_H[i][0])
|
882 |
-
for j in range(out_C):
|
883 |
-
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
|
884 |
-
|
885 |
-
# process W dimension
|
886 |
-
# symmetric copying
|
887 |
-
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
|
888 |
-
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
|
889 |
-
|
890 |
-
sym_patch = out_1[:, :sym_len_Ws, :]
|
891 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
892 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
893 |
-
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
|
894 |
-
|
895 |
-
sym_patch = out_1[:, -sym_len_We:, :]
|
896 |
-
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
|
897 |
-
sym_patch_inv = sym_patch.index_select(1, inv_idx)
|
898 |
-
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
|
899 |
-
|
900 |
-
out_2 = torch.FloatTensor(out_H, out_W, in_C)
|
901 |
-
kernel_width = weights_W.size(1)
|
902 |
-
for i in range(out_W):
|
903 |
-
idx = int(indices_W[i][0])
|
904 |
-
for j in range(out_C):
|
905 |
-
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
|
906 |
-
if need_squeeze:
|
907 |
-
out_2.squeeze_()
|
908 |
-
|
909 |
-
return out_2.numpy()
|
910 |
-
|
911 |
-
|
912 |
-
if __name__ == '__main__':
|
913 |
-
print('---')
|
914 |
-
# img = imread_uint('test.bmp', 3)
|
915 |
-
# img = uint2single(img)
|
916 |
-
# img_bicubic = imresize_np(img, 1/4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/phi/m_bg.wasm.d.ts
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
/* tslint:disable */
|
2 |
-
/* eslint-disable */
|
3 |
-
export const memory: WebAssembly.Memory;
|
4 |
-
export function __wbg_model_free(a: number): void;
|
5 |
-
export function model_load(a: number, b: number, c: number, d: number, e: number, f: number): void;
|
6 |
-
export function model_init_with_prompt(a: number, b: number, c: number, d: number, e: number, f: number, g: number, h: number, i: number): void;
|
7 |
-
export function model_next_token(a: number, b: number): void;
|
8 |
-
export function main(a: number, b: number): number;
|
9 |
-
export function __wbindgen_add_to_stack_pointer(a: number): number;
|
10 |
-
export function __wbindgen_malloc(a: number, b: number): number;
|
11 |
-
export function __wbindgen_realloc(a: number, b: number, c: number, d: number): number;
|
12 |
-
export function __wbindgen_free(a: number, b: number, c: number): void;
|
13 |
-
export function __wbindgen_exn_store(a: number): void;
|
14 |
-
export function __wbindgen_start(): void;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/pie/Pie.js
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import Base from '../base/Base.js';
|
2 |
-
import { Arc } from '../utils/Geoms.js';
|
3 |
-
|
4 |
-
const Linear = Phaser.Math.Linear;
|
5 |
-
|
6 |
-
class Pie extends Base {
|
7 |
-
constructor(scene, config) {
|
8 |
-
super(scene, config);
|
9 |
-
this.type = 'rexSpinnerPie';
|
10 |
-
}
|
11 |
-
|
12 |
-
buildShapes() {
|
13 |
-
for (var i = 0; i < 4; i++) {
|
14 |
-
var pie = (new Arc()).setPie();
|
15 |
-
this.addShape(pie);
|
16 |
-
|
17 |
-
pie.setData('speed', Linear(180, 360, Math.random()));
|
18 |
-
}
|
19 |
-
}
|
20 |
-
|
21 |
-
updateShapes() {
|
22 |
-
var centerX = this.centerX;
|
23 |
-
var centerY = this.centerY;
|
24 |
-
var radius = this.radius;
|
25 |
-
|
26 |
-
var deltaValue;
|
27 |
-
if (this.prevValue !== undefined) {
|
28 |
-
deltaValue = this.value - this.prevValue;
|
29 |
-
if (this.prevValue > this.value) {
|
30 |
-
deltaValue += 1;
|
31 |
-
}
|
32 |
-
}
|
33 |
-
|
34 |
-
var shapes = this.getShapes();
|
35 |
-
for (var i = 0, cnt = shapes.length; i < cnt; i++) {
|
36 |
-
var pie = shapes[i];
|
37 |
-
var pieAlpha = (i + 1) / cnt;
|
38 |
-
|
39 |
-
if (this.prevValue === undefined) {
|
40 |
-
var startAngle = (i / cnt) * 360;
|
41 |
-
var endAngle = startAngle + 90;
|
42 |
-
pie
|
43 |
-
.fillStyle(this.color, pieAlpha)
|
44 |
-
.setRadius(radius)
|
45 |
-
.setCenterPosition(centerX, centerY)
|
46 |
-
.setAngle(startAngle, endAngle)
|
47 |
-
.setData('angle', startAngle);
|
48 |
-
} else {
|
49 |
-
var startAngle = pie.getData('angle') + pie.getData('speed') * deltaValue;
|
50 |
-
startAngle = startAngle % 360;
|
51 |
-
var endAngle = startAngle + 90;
|
52 |
-
pie
|
53 |
-
.fillStyle(this.color, pieAlpha)
|
54 |
-
.setRadius(radius)
|
55 |
-
.setCenterPosition(centerX, centerY)
|
56 |
-
.setAngle(startAngle, endAngle)
|
57 |
-
.setData('angle', startAngle);
|
58 |
-
|
59 |
-
}
|
60 |
-
|
61 |
-
}
|
62 |
-
|
63 |
-
this.prevValue = this.value;
|
64 |
-
|
65 |
-
}
|
66 |
-
}
|
67 |
-
|
68 |
-
export default Pie;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinputbase/methods/CreateSwatch.js
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
import RoundRectangle from '../../../roundrectangle/RoundRectangle.js';
|
2 |
-
import IsGameObject from '../../../../../plugins/utils/system/IsGameObject.js';
|
3 |
-
|
4 |
-
var CreateSwatch = function (scene, config) {
|
5 |
-
if (config === false) {
|
6 |
-
return null;
|
7 |
-
} else if (IsGameObject(config)) {
|
8 |
-
return config;
|
9 |
-
}
|
10 |
-
|
11 |
-
var swatch = new RoundRectangle(scene, config);
|
12 |
-
scene.add.existing(swatch);
|
13 |
-
return swatch;
|
14 |
-
}
|
15 |
-
|
16 |
-
export default CreateSwatch;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetMaxChildWidth.js
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
var GetMaxChildWidth = function (children) {
|
2 |
-
if (children === undefined) {
|
3 |
-
children = this.sizerChildren;
|
4 |
-
}
|
5 |
-
var result = 0;
|
6 |
-
var child, childWidth;
|
7 |
-
for (var i = 0, cnt = children.length; i < cnt; i++) {
|
8 |
-
child = children[i];
|
9 |
-
if (child === '\n') {
|
10 |
-
continue;
|
11 |
-
}
|
12 |
-
|
13 |
-
childWidth = this.getChildWidth(child);
|
14 |
-
result = Math.max(childWidth, result);
|
15 |
-
}
|
16 |
-
return result;
|
17 |
-
}
|
18 |
-
export default GetMaxChildWidth;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aiusernumber5/janitorai/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Janitorai
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/text/__init__.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
""" from https://github.com/keithito/tacotron """
|
2 |
-
from text import cleaners
|
3 |
-
from text.symbols import symbols
|
4 |
-
|
5 |
-
|
6 |
-
# Mappings from symbol to numeric ID and vice versa:
|
7 |
-
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
8 |
-
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
|
9 |
-
|
10 |
-
|
11 |
-
def text_to_sequence(text, symbols, cleaner_names):
|
12 |
-
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
13 |
-
Args:
|
14 |
-
text: string to convert to a sequence
|
15 |
-
cleaner_names: names of the cleaner functions to run the text through
|
16 |
-
Returns:
|
17 |
-
List of integers corresponding to the symbols in the text
|
18 |
-
'''
|
19 |
-
sequence = []
|
20 |
-
symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
21 |
-
clean_text = _clean_text(text, cleaner_names)
|
22 |
-
print(clean_text)
|
23 |
-
print(f" length:{len(clean_text)}")
|
24 |
-
for symbol in clean_text:
|
25 |
-
if symbol not in symbol_to_id.keys():
|
26 |
-
continue
|
27 |
-
symbol_id = symbol_to_id[symbol]
|
28 |
-
sequence += [symbol_id]
|
29 |
-
print(f" length:{len(sequence)}")
|
30 |
-
return sequence
|
31 |
-
|
32 |
-
|
33 |
-
def cleaned_text_to_sequence(cleaned_text, symbols):
|
34 |
-
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
|
35 |
-
Args:
|
36 |
-
text: string to convert to a sequence
|
37 |
-
Returns:
|
38 |
-
List of integers corresponding to the symbols in the text
|
39 |
-
'''
|
40 |
-
symbol_to_id = {s: i for i, s in enumerate(symbols)}
|
41 |
-
sequence = [symbol_to_id[symbol] for symbol in cleaned_text if symbol in symbol_to_id.keys()]
|
42 |
-
return sequence
|
43 |
-
|
44 |
-
|
45 |
-
def sequence_to_text(sequence):
|
46 |
-
'''Converts a sequence of IDs back to a string'''
|
47 |
-
result = ''
|
48 |
-
for symbol_id in sequence:
|
49 |
-
s = _id_to_symbol[symbol_id]
|
50 |
-
result += s
|
51 |
-
return result
|
52 |
-
|
53 |
-
|
54 |
-
def _clean_text(text, cleaner_names):
|
55 |
-
for name in cleaner_names:
|
56 |
-
cleaner = getattr(cleaners, name)
|
57 |
-
if not cleaner:
|
58 |
-
raise Exception('Unknown cleaner: %s' % name)
|
59 |
-
text = cleaner(text)
|
60 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlignmentResearch/tuned-lens/app.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from tuned_lens.nn.lenses import TunedLens, LogitLens
|
3 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
-
from tuned_lens.plotting import PredictionTrajectory
|
5 |
-
import gradio as gr
|
6 |
-
from plotly import graph_objects as go
|
7 |
-
|
8 |
-
device = torch.device("cpu")
|
9 |
-
print(f"Using device {device} for inference")
|
10 |
-
model = AutoModelForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped")
|
11 |
-
model = model.to(device)
|
12 |
-
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped")
|
13 |
-
tuned_lens = TunedLens.from_model_and_pretrained(
|
14 |
-
model=model,
|
15 |
-
map_location=device,
|
16 |
-
)
|
17 |
-
logit_lens = LogitLens.from_model(model)
|
18 |
-
|
19 |
-
lens_options_dict = {
|
20 |
-
"Tuned Lens": tuned_lens,
|
21 |
-
"Logit Lens": logit_lens,
|
22 |
-
}
|
23 |
-
|
24 |
-
statistic_options_dict = {
|
25 |
-
"Entropy": "entropy",
|
26 |
-
"Cross Entropy": "cross_entropy",
|
27 |
-
"Forward KL": "forward_kl",
|
28 |
-
}
|
29 |
-
|
30 |
-
|
31 |
-
def make_plot(lens, text, statistic, token_cutoff):
|
32 |
-
input_ids = tokenizer.encode(text)
|
33 |
-
input_ids = [tokenizer.bos_token_id] + input_ids
|
34 |
-
targets = input_ids[1:] + [tokenizer.eos_token_id]
|
35 |
-
|
36 |
-
if len(input_ids) == 1:
|
37 |
-
return go.Figure(layout=dict(title="Please enter some text."))
|
38 |
-
|
39 |
-
if token_cutoff < 1:
|
40 |
-
return go.Figure(layout=dict(title="Please provide valid token cut off."))
|
41 |
-
|
42 |
-
start_pos=max(len(input_ids) - token_cutoff, 0)
|
43 |
-
pred_traj = PredictionTrajectory.from_lens_and_model(
|
44 |
-
lens=lens_options_dict[lens],
|
45 |
-
model=model,
|
46 |
-
input_ids=input_ids,
|
47 |
-
tokenizer=tokenizer,
|
48 |
-
targets=targets,
|
49 |
-
start_pos=start_pos,
|
50 |
-
)
|
51 |
-
|
52 |
-
return getattr(pred_traj, statistic_options_dict[statistic])().figure(
|
53 |
-
title=f"{lens} ({model.name_or_path}) {statistic}",
|
54 |
-
)
|
55 |
-
|
56 |
-
preamble = """
|
57 |
-
# The Tuned Lens 🔎
|
58 |
-
|
59 |
-
A tuned lens allows us to peak at the iterative computations a transformer uses to compute the next token.
|
60 |
-
|
61 |
-
A lens into a transformer with n layers allows you to replace the last $m$ layers of the model with an [affine transformation](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) (we call these affine translators).
|
62 |
-
|
63 |
-
This essentially skips over these last few layers and lets you see the best prediction that can be made from the model's representations, i.e. the residual stream, at layer $n - m$. Since the representations may be rotated, shifted, or stretched from layer to layer it's useful to train the len's affine adapters specifically on each layer. This training is what differentiates this method from simpler approaches that decode the residual stream of the network directly using the unembeding layer i.e. the logit lens. We explain this process in [the paper](https://arxiv.org/abs/2303.08112).
|
64 |
-
|
65 |
-
## Usage
|
66 |
-
Since the tuned lens produces a distribution of predictions to visualize it's output we need to we need to provide a summary statistic to plot. The default is simply [entropy](https://en.wikipedia.org/wiki/Entropy_(information_theory)), but you can also choose the [cross entropy](https://en.wikipedia.org/wiki/Cross_entropy) with the target token, or the [KL divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between the model's predictions and the tuned lens' predictions. You can also hover over a token to see more of the distribution i.e. the top 10 most probable tokens and their probabilities.
|
67 |
-
|
68 |
-
## Examples
|
69 |
-
Here are some interesting examples you can try.
|
70 |
-
|
71 |
-
### Copy paste:
|
72 |
-
```
|
73 |
-
Copy: A!2j!#u&NGApS&MkkHe8Gm!#
|
74 |
-
Paste: A!2j!#u&NGApS&MkkHe8Gm!#
|
75 |
-
```
|
76 |
-
|
77 |
-
### Trivial in-context learning
|
78 |
-
```
|
79 |
-
inc 1 2
|
80 |
-
inc 4 5
|
81 |
-
inc 13
|
82 |
-
```
|
83 |
-
|
84 |
-
#### Addition
|
85 |
-
```
|
86 |
-
add 1 1 2
|
87 |
-
add 3 4 7
|
88 |
-
add 13 2
|
89 |
-
```
|
90 |
-
"""
|
91 |
-
|
92 |
-
with gr.Blocks() as demo:
|
93 |
-
gr.Markdown(preamble)
|
94 |
-
with gr.Column():
|
95 |
-
text = gr.Textbox(
|
96 |
-
value="it was the best of times, it was the worst of times",
|
97 |
-
label="Input Text",
|
98 |
-
)
|
99 |
-
with gr.Row():
|
100 |
-
lens_options = gr.Dropdown(
|
101 |
-
list(lens_options_dict.keys()), value="Tuned Lens", label="Select Lens"
|
102 |
-
)
|
103 |
-
statistic = gr.Dropdown(
|
104 |
-
list(statistic_options_dict.keys()),
|
105 |
-
value="Entropy",
|
106 |
-
label="Select Statistic",
|
107 |
-
)
|
108 |
-
token_cutoff = gr.Slider(
|
109 |
-
maximum=20, minimum=2, value=10, step=1, label="Plot Last N Tokens"
|
110 |
-
)
|
111 |
-
examine_btn = gr.Button(value="Submit")
|
112 |
-
plot = gr.Plot()
|
113 |
-
examine_btn.click(make_plot, [lens_options, text, statistic, token_cutoff], plot)
|
114 |
-
demo.load(make_plot, [lens_options, text, statistic, token_cutoff], plot)
|
115 |
-
|
116 |
-
if __name__ == "__main__":
|
117 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-PITS/text/frontend/zh_frontend.py
DELETED
@@ -1,287 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
import re
|
15 |
-
from typing import List
|
16 |
-
|
17 |
-
import jieba.posseg as psg
|
18 |
-
import numpy as np
|
19 |
-
from g2pM import G2pM
|
20 |
-
from pypinyin import Style
|
21 |
-
from pypinyin import lazy_pinyin
|
22 |
-
from pypinyin import load_single_dict
|
23 |
-
from pypinyin_dict.phrase_pinyin_data import large_pinyin
|
24 |
-
|
25 |
-
from text.frontend.generate_lexicon import generate_lexicon
|
26 |
-
from text.frontend.tone_sandhi import ToneSandhi
|
27 |
-
from text.frontend.zh_normalization.text_normlization import TextNormalizer
|
28 |
-
|
29 |
-
|
30 |
-
class Frontend():
|
31 |
-
def __init__(self,
|
32 |
-
g2p_model="pypinyin",
|
33 |
-
phone_vocab_path=None,
|
34 |
-
tone_vocab_path=None):
|
35 |
-
self.tone_modifier = ToneSandhi()
|
36 |
-
self.text_normalizer = TextNormalizer()
|
37 |
-
|
38 |
-
self.punc = ['!', '?', '…', ",", ".", "#", '-', "%", "$"]
|
39 |
-
# g2p_model can be pypinyin and g2pM
|
40 |
-
self.g2p_model = g2p_model
|
41 |
-
self.add_word_sep = True
|
42 |
-
if self.g2p_model == "g2pM":
|
43 |
-
self.g2pM_model = G2pM()
|
44 |
-
self.pinyin2phone = generate_lexicon(
|
45 |
-
with_tone=True, with_erhua=False)
|
46 |
-
else:
|
47 |
-
|
48 |
-
self.__init__pypinyin()
|
49 |
-
self.must_erhua = {"小院儿", "胡同儿", "范儿", "老汉儿", "撒欢儿", "寻老礼儿", "妥妥儿"}
|
50 |
-
self.not_erhua = {
|
51 |
-
"虐儿", "为儿", "护儿", "瞒儿", "救儿", "替儿", "有儿", "一儿", "我儿", "俺儿", "妻儿",
|
52 |
-
"拐儿", "聋儿", "乞儿", "患儿", "幼儿", "孤儿", "婴儿", "婴幼儿", "连体儿", "脑瘫儿",
|
53 |
-
"流浪儿", "体弱儿", "混血儿", "蜜雪儿", "舫儿", "祖儿", "美儿", "应采儿", "可儿", "侄儿",
|
54 |
-
"孙儿", "侄孙儿", "女儿", "男儿", "红孩儿", "花儿", "虫儿", "马儿", "鸟儿", "猪儿", "猫儿",
|
55 |
-
"狗儿"
|
56 |
-
}
|
57 |
-
self.vocab_phones = {}
|
58 |
-
self.vocab_tones = {}
|
59 |
-
if phone_vocab_path:
|
60 |
-
with open(phone_vocab_path, 'rt') as f:
|
61 |
-
phn_id = [line.strip().split() for line in f.readlines()]
|
62 |
-
for phn, id in phn_id:
|
63 |
-
self.vocab_phones[phn] = int(id)
|
64 |
-
if tone_vocab_path:
|
65 |
-
with open(tone_vocab_path, 'rt') as f:
|
66 |
-
tone_id = [line.strip().split() for line in f.readlines()]
|
67 |
-
for tone, id in tone_id:
|
68 |
-
self.vocab_tones[tone] = int(id)
|
69 |
-
print("initialized zh frontend")
|
70 |
-
|
71 |
-
def __init__pypinyin(self):
|
72 |
-
large_pinyin.load()
|
73 |
-
#
|
74 |
-
# load_phrases_dict({u'开户行': [[u'ka1i'], [u'hu4'], [u'hang2']]})
|
75 |
-
# load_phrases_dict({u'发卡行': [[u'fa4'], [u'ka3'], [u'hang2']]})
|
76 |
-
# load_phrases_dict({u'放款行': [[u'fa4ng'], [u'kua3n'], [u'hang2']]})
|
77 |
-
# load_phrases_dict({u'茧行': [[u'jia3n'], [u'hang2']]})
|
78 |
-
# load_phrases_dict({u'行号': [[u'hang2'], [u'ha4o']]})
|
79 |
-
# load_phrases_dict({u'各地': [[u'ge4'], [u'di4']]})
|
80 |
-
# load_phrases_dict({u'借还款': [[u'jie4'], [u'hua2n'], [u'kua3n']]})
|
81 |
-
# load_phrases_dict({u'时间为': [[u'shi2'], [u'jia1n'], [u'we2i']]})
|
82 |
-
# load_phrases_dict({u'为准': [[u'we2i'], [u'zhu3n']]})
|
83 |
-
# load_phrases_dict({u'色差': [[u'se4'], [u'cha1']]})
|
84 |
-
|
85 |
-
# 调整字的拼音顺序
|
86 |
-
load_single_dict({ord(u'地'): u'de,di4'})
|
87 |
-
|
88 |
-
def _get_initials_finals(self, word: str) -> List[List[str]]:
|
89 |
-
initials = []
|
90 |
-
finals = []
|
91 |
-
if self.g2p_model == "pypinyin":
|
92 |
-
orig_initials = lazy_pinyin(
|
93 |
-
word, neutral_tone_with_five=True, style=Style.INITIALS)
|
94 |
-
orig_finals = lazy_pinyin(
|
95 |
-
word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
|
96 |
-
for c, v in zip(orig_initials, orig_finals):
|
97 |
-
if re.match(r'i\d', v):
|
98 |
-
if c in ['z', 'c', 's']:
|
99 |
-
v = re.sub('i', 'ii', v)
|
100 |
-
elif c in ['zh', 'ch', 'sh', 'r']:
|
101 |
-
v = re.sub('i', 'iii', v)
|
102 |
-
initials.append(c)
|
103 |
-
finals.append(v)
|
104 |
-
elif self.g2p_model == "g2pM":
|
105 |
-
pinyins = self.g2pM_model(word, tone=True, char_split=False)
|
106 |
-
for pinyin in pinyins:
|
107 |
-
pinyin = pinyin.replace("u:", "v")
|
108 |
-
if pinyin in self.pinyin2phone:
|
109 |
-
initial_final_list = self.pinyin2phone[pinyin].split(" ")
|
110 |
-
if len(initial_final_list) == 2:
|
111 |
-
initials.append(initial_final_list[0])
|
112 |
-
finals.append(initial_final_list[1])
|
113 |
-
elif len(initial_final_list) == 1:
|
114 |
-
initials.append('')
|
115 |
-
finals.append(initial_final_list[1])
|
116 |
-
else:
|
117 |
-
# If it's not pinyin (possibly punctuation) or no conversion is required
|
118 |
-
initials.append(pinyin)
|
119 |
-
finals.append(pinyin)
|
120 |
-
return initials, finals
|
121 |
-
|
122 |
-
# if merge_sentences, merge all sentences into one phone sequence
|
123 |
-
def _g2p(self,
|
124 |
-
sentences: List[str],
|
125 |
-
merge_sentences: bool = True,
|
126 |
-
with_erhua: bool = True) -> List[List[str]]:
|
127 |
-
segments = sentences
|
128 |
-
phones_list = []
|
129 |
-
for seg in segments:
|
130 |
-
phones = []
|
131 |
-
# Replace all English words in the sentence
|
132 |
-
seg = re.sub('[a-zA-Z]+', '', seg)
|
133 |
-
seg_cut = psg.lcut(seg)
|
134 |
-
initials = []
|
135 |
-
finals = []
|
136 |
-
seg_cut = self.tone_modifier.pre_merge_for_modify(seg_cut)
|
137 |
-
for word, pos in seg_cut:
|
138 |
-
if self.add_word_sep and word == "#":
|
139 |
-
continue
|
140 |
-
if pos == 'eng':
|
141 |
-
continue
|
142 |
-
sub_initials, sub_finals = self._get_initials_finals(word)
|
143 |
-
sub_finals = self.tone_modifier.modified_tone(word, pos,
|
144 |
-
sub_finals)
|
145 |
-
if with_erhua:
|
146 |
-
sub_initials, sub_finals = self._merge_erhua(
|
147 |
-
sub_initials, sub_finals, word, pos)
|
148 |
-
initials.append(sub_initials)
|
149 |
-
finals.append(sub_finals)
|
150 |
-
if self.add_word_sep and word not in self.punc:
|
151 |
-
initials.append(["#"])
|
152 |
-
finals.append(["#"])
|
153 |
-
|
154 |
-
# assert len(sub_initials) == len(sub_finals) == len(word)
|
155 |
-
initials = sum(initials, [])
|
156 |
-
finals = sum(finals, [])
|
157 |
-
|
158 |
-
for c, v in zip(initials, finals):
|
159 |
-
# NOTE: post process for pypinyin outputs
|
160 |
-
# we discriminate i, ii and iii
|
161 |
-
if c:
|
162 |
-
phones.append(c)
|
163 |
-
if v and v not in self.punc:
|
164 |
-
phones.append(v)
|
165 |
-
|
166 |
-
phones_list.append(phones)
|
167 |
-
if merge_sentences:
|
168 |
-
merge_list = sum(phones_list, [])
|
169 |
-
# rm the last 'sp' to avoid the noise at the end
|
170 |
-
# cause in the training data, no 'sp' in the end
|
171 |
-
if merge_list[-1] == 'sp':
|
172 |
-
merge_list = merge_list[:-1]
|
173 |
-
phones_list = []
|
174 |
-
phones_list.append(merge_list)
|
175 |
-
return phones_list
|
176 |
-
|
177 |
-
def _merge_erhua(self,
|
178 |
-
initials: List[str],
|
179 |
-
finals: List[str],
|
180 |
-
word: str,
|
181 |
-
pos: str) -> List[List[str]]:
|
182 |
-
if word not in self.must_erhua and (word in self.not_erhua or
|
183 |
-
pos in {"a", "j", "nr"}):
|
184 |
-
return initials, finals
|
185 |
-
# "……" 等情况直接返回
|
186 |
-
if len(finals) != len(word):
|
187 |
-
return initials, finals
|
188 |
-
|
189 |
-
assert len(finals) == len(word)
|
190 |
-
|
191 |
-
new_initials = []
|
192 |
-
new_finals = []
|
193 |
-
for i, phn in enumerate(finals):
|
194 |
-
if i == len(finals) - 1 and word[i] == "儿" and phn in {
|
195 |
-
"er2", "er5"
|
196 |
-
} and word[-2:] not in self.not_erhua and new_finals:
|
197 |
-
new_finals[-1] = new_finals[-1][:-1] + "r" + new_finals[-1][-1]
|
198 |
-
else:
|
199 |
-
new_finals.append(phn)
|
200 |
-
new_initials.append(initials[i])
|
201 |
-
return new_initials, new_finals
|
202 |
-
|
203 |
-
def _p2id(self, phonemes: List[str]) -> np.array:
|
204 |
-
# replace unk phone with sp
|
205 |
-
phonemes = [
|
206 |
-
phn if phn in self.vocab_phones else "sp" for phn in phonemes
|
207 |
-
]
|
208 |
-
phone_ids = [self.vocab_phones[item] for item in phonemes]
|
209 |
-
return np.array(phone_ids, np.int64)
|
210 |
-
|
211 |
-
def _t2id(self, tones: List[str]) -> np.array:
|
212 |
-
# replace unk phone with sp
|
213 |
-
tones = [tone if tone in self.vocab_tones else "0" for tone in tones]
|
214 |
-
tone_ids = [self.vocab_tones[item] for item in tones]
|
215 |
-
return np.array(tone_ids, np.int64)
|
216 |
-
|
217 |
-
def _get_phone_tone(self, phonemes: List[str],
|
218 |
-
get_tone_ids: bool = False) -> List[List[str]]:
|
219 |
-
phones = []
|
220 |
-
tones = []
|
221 |
-
if get_tone_ids and self.vocab_tones:
|
222 |
-
for full_phone in phonemes:
|
223 |
-
# split tone from finals
|
224 |
-
match = re.match(r'^(\w+)([012345])$', full_phone)
|
225 |
-
if match:
|
226 |
-
phone = match.group(1)
|
227 |
-
tone = match.group(2)
|
228 |
-
# if the merged erhua not in the vocab
|
229 |
-
# assume that the input is ['iaor3'] and 'iaor' not in self.vocab_phones, we split 'iaor' into ['iao','er']
|
230 |
-
# and the tones accordingly change from ['3'] to ['3','2'], while '2' is the tone of 'er2'
|
231 |
-
if len(phone) >= 2 and phone != "er" and phone[
|
232 |
-
-1] == 'r' and phone not in self.vocab_phones and phone[:
|
233 |
-
-1] in self.vocab_phones:
|
234 |
-
phones.append(phone[:-1])
|
235 |
-
phones.append("er")
|
236 |
-
tones.append(tone)
|
237 |
-
tones.append("2")
|
238 |
-
else:
|
239 |
-
phones.append(phone)
|
240 |
-
tones.append(tone)
|
241 |
-
else:
|
242 |
-
phones.append(full_phone)
|
243 |
-
tones.append('0')
|
244 |
-
else:
|
245 |
-
for phone in phonemes:
|
246 |
-
# if the merged erhua not in the vocab
|
247 |
-
# assume that the input is ['iaor3'] and 'iaor' not in self.vocab_phones, change ['iaor3'] to ['iao3','er2']
|
248 |
-
if len(phone) >= 3 and phone[:-1] != "er" and phone[
|
249 |
-
-2] == 'r' and phone not in self.vocab_phones and (
|
250 |
-
phone[:-2] + phone[-1]) in self.vocab_phones:
|
251 |
-
phones.append((phone[:-2] + phone[-1]))
|
252 |
-
phones.append("er2")
|
253 |
-
else:
|
254 |
-
phones.append(phone)
|
255 |
-
return phones, tones
|
256 |
-
|
257 |
-
def get_phonemes(self,
|
258 |
-
sentence: str,
|
259 |
-
merge_sentences: bool = True,
|
260 |
-
with_erhua: bool = False,
|
261 |
-
robot: bool = False,
|
262 |
-
print_info: bool = False) -> List[List[str]]:
|
263 |
-
sentence = sentence.replace("嗯", "恩")
|
264 |
-
sentences = self.text_normalizer.normalize(sentence)
|
265 |
-
phonemes = self._g2p(
|
266 |
-
sentences, merge_sentences=merge_sentences, with_erhua=with_erhua)
|
267 |
-
# change all tones to `1`
|
268 |
-
if robot:
|
269 |
-
new_phonemes = []
|
270 |
-
for sentence in phonemes:
|
271 |
-
new_sentence = []
|
272 |
-
for item in sentence:
|
273 |
-
# `er` only have tone `2`
|
274 |
-
if item[-1] in "12345" and item != "er2":
|
275 |
-
item = item[:-1] + "1"
|
276 |
-
new_sentence.append(item)
|
277 |
-
new_phonemes.append(new_sentence)
|
278 |
-
phonemes = new_phonemes
|
279 |
-
if print_info:
|
280 |
-
print("----------------------------")
|
281 |
-
print("text norm results:")
|
282 |
-
print(sentences)
|
283 |
-
print("----------------------------")
|
284 |
-
print("g2p results:")
|
285 |
-
print(phonemes)
|
286 |
-
print("----------------------------")
|
287 |
-
return phonemes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aloento/9Nine-PITS/text/frontend/zh_normalization/char_convert.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Traditional and simplified Chinese conversion, a simplified character may correspond to multiple traditional characters.
|
16 |
-
"""
|
17 |
-
simplified_charcters = '制咖片型超声盘鉴定仔点他命书歌粉巾字帐恤手指记忆棒形转弯沟光○〇㐄㐅㐆㐌㐖毒㐜㐡㐤㐰㐺㑇㑳㒳㒸㔾㗂㗎㝵㞎㞙㞞以㢲㢴㤅㥁㥯㨗㫺㬎㮎㮚㮸㲋㲱㲾㳮涧㵪㶸㷖㷭㹢㹴犬㺢狓㺵碗㽮㿝䍃䔢䖟䖸䗈䗥䗪䝓射䥯䦉䯝鲃鱼䲔䳗鹅䵹鼄䶑一对应映射丁不识下儿子做二休世丘之貉并中台原则串为甚谓干净了百事无成八变五十些人得道鸡升天代如并来去个国政策劲幽灵在欧洲游荡接样萝卜坑侧化传价元论醇共再准刀两断切分耕耘收获钱货物向看旧就绪险刻千金动劳永逸匙零夜半卡通回复返影踪反常态口咬气句话同吐快吹周味呼诺呜品红锅哄而散起唱和问三知生熟团漆黑火糟堆场空块面塌糊涂尘染壁厢夔已足多情露水大早到晚夫妻当关万莫开失古恨套所料既往孔见提师要家主审寸阴难买斗牛小撮部阵局展身层巴掌帆风顺席地带过年计于春头载四季期被蛇怕井绳度愿式份弹顷深前律径心意念差愁孤行俱全房厅交遮打技长把抓死拿眼泪鼻涕钥锁折段抿拍即合扫排掬挥拨拥上入击洞掷揽改故辙败文值名斑方面旁族日秋餐隔雅里终父旦时晌会霎间晃暴寒曝更月望垠际朝夕本正经利杯羹东西板枝独秀根筋杆进条龙服务概模次函数又性程总付步脚印趋登毛拔呵氧氮碳决雌雄波未平派谎言流清楚白准溜烟潭有获闻是处降琴鹤甲病发可拾沙目然了直以相眨穿睹瞥瞬矢的解石鸟神教秉虔诚秘种窝蜂穷窍笑置笔苟勾销抹杀煞等奖箍节吃箭仇双雕诗筹箩筐系列纸级士官统丝毫挂维网尽线微吭响股脑胎脉承腔臂力致效资源址器举功投般说讲规贸易叶障着慎满皆输号木电池衣倾钟高低视仁觉醒览遗角银币触溃九鼎蔽抄出驷马追重语破贫洗贯走路安蹴至几蹶振跃役胆汗较辈轮辞赞退六连遍递边针血锤音错门思闪真倒项栽雾类保护川先惊乍体哄鳞爪鸣滴泡邻域党专鼓作齐炒丑烯亥克内酯冬加奴卯肝炎基尺梁街裤镐客宠庭巳汝昌烷玲磊糖肇酉醛啷青县韪良香骨鲷丂七集河市弦喜嘴张舌堵区工业姊妹星架构巧彩扭歪拼凑余热曜武州爷浮屠美乡老阶树荤素碎落能魄鳃鳗珠丄丅丆万俟丈尚摸母娘量管群亚虎必我堂令申件装伏位博侠义界表女墟台戏臭皮匠胜诸葛亮赛顶倍催请运算包立叉戟离疫苗土史志演围揭瓦晒夷姑婆帝村宝烂尖杉碱屉桌山岔岛由纪峡坝库镇废从德后拗汤治旬食明昧曹朋友框栏极权幂曲归依猫民氟硼氯磷铁江侗自旅法司洋浦梅园温暖湾焦班幸用田略番叠皇炮捶硝苯酸腺苷棱草镜穗跳远索锦纲聚氰胺联店胚膲爱色堇紫罗兰芝茶饭菱云虫藏藩乱叛苏亲债凳学座恐恋柱测肌腹衩锥系貂企乌跪叩军车农题迭都甘油屯奏键短阿姨陪姐只顾茅庐槽驾魂鲜鹿页其菜单乘任供势午齿汉组织吊调泻唇坡城报坟外夸将尉建筑岸岗公床扬新剑升杭林栗校楼标款汽社浣海商馆剧院钢华港机械广媒环球融第医科证券综财乐育游涨犹岭疏瘾睑确兵领导缴肢膛船艾瑟尔苍蔡虞效衫覆访诉课谕议轨述野钩限敌鞋颌颔颚饶首龈站例修凡划垂届属崽颏厨拜挫摆放旋削棋榻槛礼沉注滑营狱画确仪聘花葬诏员跌辖周达酒锚闸陷陆雨雪飞威丌于丹久乏予理评产亢卑亦乎舞己悲矩圆词害志但住佞佳便俗信票案幅翁倦伦假偏倚斜亏鬼敲停备伤脾胃仅此像俭匮免宜穴焉戴兼容许冻伯仲负彼昼皂轩轾实刊划颠卫战哥比省非好黄饰别拘束掩奶睬选择摇扰烦苦枚写协厌及格受欢迎约只估侵犯割状告或缺抗拒挽撤救药喻磨灭端倪少逆逾越避靠适吉誉吝玉含延咎歹听啻渊善谋均匀堪忍够太惹妙妥妨孕症孝术室完纳推冠积宣疑辩栗碴称屈挠屑干涉衡待很忙恶忿怎么怠急耻恭息悦惑惜惟想愉愧怍慌愤启懂懈怀材才紧招认扣抵拉舍也罢插揣冒搭撞南墙扩核支攻敢雷攀敬里吗需景智暇曾罪遇朽枉止况竞争辱求愈渝溶济左右袒困补爽特寂寞示弱找谢畏强疾徐痛痒冤符眠睦瞅董何厚云措活疲羞者轻玻璃祥兆禁���稂莠稳佛换答简结果盟绝缕途给谈否羁翼耐肖胫毋宁兴舒若菲莱痕迹窠臼虚衰脸兔撒鹰棺范该详讳抬泰让须眉象众赀账费灰赖奇虑训辍辨菽麦辛近送透逞徒速续逮捕遂遑违逊斧钺艰醉锈随观弃显饱脂肪使丏丐帮丒且慢末丕替桃宗王尊凉爵各图屋脊粮署录坛吾禄职胄袭君厦丗北壑桐疹损逢陵鹬丙寅戌氨腈唑纶辰酮脱氢酶醚丞丢现掉纱帽弄扯炮碗丠両丣坐存激肩臻蒂莲悖序驱丨丩丫挺杈髻鬟细介俄伊犁京尼布订普渡央委监察检查剂圈设警队斯督剩震境航舶革防托播促质版蝾螈锋研艺历残消频谱精密制造陲邮候埔坚压坜凹汇执府究邦俘摄寮彬狼岳肺肿庸英讯诊埋粒胞括控码韩暑枪枢砥澳哇牟寿甸钻探篇签缀缝继耳肯照妇埃悬璧轴柜台辣搁浅邪跑纤阮阳私囊魔丮丰姿采丱烧丳丵丶丷丸参寨朗桂瑞砂衷霞貌凤仆舰因嫌宰峰干络牌持旨祭祷簿编罚宾办丼丿乀乂乃乄仰慕盛旷留考验阔乆乇么丑麽乊湖燃乑乒乓乕乖僻忤戾离谬迕乗危肥劫除隙浪婿乙炔肠酰吡咯盐乚乛乜嘢卿玄宫尾狐龟塔嶷兄弟泉章霄钉耙乞扎哀怜恕讨乢乣乤乥乧乨乩童乪乫乭乳晕汁液瑶浆牙癌突窦罩腐胶猪酪蛋糕菌瘤乴乵乶乷乸乹乺乼乾俸冰嘉哕嚎坤妈尸垒旱枯涸俐渴潮涩煸豆燥爹瘦瘪癣瞪袋脆姜贝隆馏乿亀亁叫咕攘扔搞男砸窜蓬麻亃亄亅却亇迟典今临繁累卵奉婚聪躬巨与迁添裂副宿岁怪恶尕仑愣杆硅硫钛铀锰芑杂异钠砷胂磺琥珀舱棍簧胡茬盗浩盆贩郎腿亍洪亐互欠助勉惠操斥诿系户译亓墓碑刑铃卅渠缤纷斗米旗宪钒灯徽瘟祖拳福谷丰脏腑绑肉腌苓蕴桥铺霸颜闹判喷冈底蛙陉矿亖亘亜罕们娜桑那努哈喀弗烈曼松森杜氏杯奥琛敦戊穆圣裔汇薛孙亟亡佚虏羊牢奋释卷卸契媾感额睫缠谊趾塞挤纽阻还配驰庄亨洛祚亪享津沪畿郊慈菴枇杷膏亭阁锃丽亳亶亹诛初责翻疯偶杰丛稠妖拖寰居吸授慧蜗吞壮魅狗矛盾益渣患忧稀描猿梦暂涯畜祸缘沸搜引擎臣横纭谁混援蒸兽狮税剖亻亼亽亡什献刹邡么仂仃仄仆富怨仈仉毕昔晨壳绍仍仏仒仕宦仗欺恃腰叹叹炬梓讫施仙后琼逝仚仝仞仟悔仡佬偿填泊拓扑簇羔购顿钦佩发棻阃驭养亿儆尤借帧赈凌叙帖李柔刚沃眦睚戒讹取飨读仨仫仮著泳卧躺韶夏裁仳仵唯贤凭钓诞仿似宋佛讽伀硕盼鹅伄儅伈伉俪柯始娃迈戈坦堡帕茨萨庙玛莉莎藤霍姆伋伍奢胥廷芳豪伎俩侍汛勒希羲雏伐憩整谟闲闲伕伙伴颐伜伝伢叔恒兹恩翰伱伲侣伶俜悧鼬伸懒缩喇叭伹伺伻伽倻辐伾似佃伫布乔妮墨佉卢佌贷劣廉昂档浓矮伞洼缓耗胸谷迷挡率龋宅沫舍疗佐贰佑占优据铧尝呢须鲁晓佗佘余坪寺瓜铳僧蒙芒陀龛哼呕坊奸孽弊揖祟茧缚誓贼佝偻瞀佟你夺赶佡佢佣佤佧贾佪佫佯佰佱洁绩酿肴佴卷佶佷佸佹佺佻佼佽佾具唤窘坏娱怒慨硬习惯聋膨胀蔓骇贵痹侀侁侂侃侄侅鸿燕侇侈糜靡侉侌妾侏儒仓鼠侐侑侔仑侘侚链侜偎傍钴循柳葫芦附価侮骂蔑侯岩截蚀局贴壶嬛宴捷携桶笺酌俣狭膝狄俅俉俊俏俎俑俓俔谚俚俛黎健呈固墒增守康箱湿祐镖镳杠盒靖膜龄俞豹猎噪孚封札筒托衍鸽剪撰稿炼厂禊练缮葺俯瞰撑冲效俳俴俵俶俷俺备俾伥倂倅储卒惶敷猝逃颉蓄崇隐倌倏忽刺蜡烛噍嚼坍扁抽毙葱楣灌灶粪背薮卖赔闭霉腾倓倔幸倘倜傥倝借箸挹浇阅倡狂倢倣値倥偬倨傲倩匡嗣冲柝珍倬倭寇猩倮倶倷倹勤赞偁偃充伪吏嗓寐惺扮拱芫茜藉虢钞偈伟晶偌宕距析滤殿疼瘫注颇偓偕鸭歇滞偝偟偢忘怡旺偨偩逼偫偭偯偰偱偲侦缉蹄偷减惰漏窥窃偸偺迹傀儡傅傈僳骂篱傎奎琳迪叟芭傒傔傕伧悉荒傜傞傢傣芽逼佣婢傮睨寄檄诵谣颂伛担辜弓惨蒿悼疤傺傻屄臆巢泄箧羡盖轧颓傿㑩僄僇佥僊働僎侨僔僖僚僝伪僣僤侥僦猴偾僩僬僭僮僯僰雇僵殖签静僾僿征陇儁侬儃儇侩朴薄儊儋儌儍傧儓俦侪拟尽儜儞儤儦儩汰哉寡渥裕酷儭儱罐儳儵儹傩俨儽兀臬臲鹫允勋勋宙宵帅憝彝谐嫂阋畅沛溢盈饥赫凶悍狠猛顽愚妣斩秦遣鞭耀敏荣槃泽爆碟磁秃缆辉霁卤朵娄孜烽酱勃汀箕裘钳耶蒙蕾彻兑软遭黜兎児韵媳爸兕觥兖兙兛兜售鍪肚兝兞兟兡兢兣樽殓涅睡禀籍赘泌啡肽奸幕涵涝熵疚眷稃衬讧赴焕椒歼植跏没试误猜栖窗肋袖颊兪卦撇胡岐廓轿疸枫茴珑厕秩募勺吨寓斤历亩迫筷厘最淫螺韬兮宽匪筛襄赢轭复兲诈刃堰戎痞蚁饷它冀铸冂冃円冇冉册嫁厉砺竭醮冏牧冑冓冔冕冖冗冘冞冢窄抑诬冥冫烘菇蛰冷凝坨橇淇淋炭饼砖碛窖醋雕雹霜冱冶炉艳嘲峻滩淡漠煖飕饮冼冽凃凄怆梗凅凇净凊凋敝蒙凔凛遵汞脢凞几凢処凰凯凵凶焰凸折刷纹预丧喽奔巡榜殡芙蓉租笼辑鞘萃凼锯镬刁蛮刂娩崩批拆摊掰蘖骤歧颗秒袂赃勿嘱忌磋琢肤刈羽刎讼戮舂桨艇刓刖霹雳刜创犊刡恙墅帜筵致劫劫刨昏默攸尿欲熏润薰圭删刮痧铲刱刲刳刴刵踏磅戳柏槐绣芹苋猬舟铭鹄鹜劫剁剃辫刭锉履铅克剌姻咽哨廊掠桅沿召瞻翅赵卜渺茫郭剒剔剕沥剚愎毅讷才剜剥啄采剞剟剡剣剤䌽剐肾驶黏剰袍剀紊铲剸剺剽剿劁劂札劈啪柴扳啦刘奭姥夼昫涓熙禅禹锡翔雁鹗刽刿弩柄蜻蛉劒劓劖劘劙澜篑赏矶釜晋甜薪逐劦熔纣虐赤囚劬劭労劵效劻劼劾峭艮勅勇励勍勐腊脖庞漫饲荡粥辄勖勗勘骄馁碌泮雇捐竹骑殊阱绩朴恳谨剿勧勩勯勰劢勋勷劝惩慰诫谏勹芡践阑匁庇拯粟扎袱裹饺匆遽匈匉匊匋匍匐茎匏匕妆痰脓蛹斋苑烤蹈塘羌熊阀螳螂疆碚竿纬荷茵邙魏匚匜匝匟扶稷匣匦拢匸匹耦匽匾匿卂叮疮禧轸堤棚迢钧炼卄卆遐卉瓷盲瓶当胱腱裸卋卌卍卐怯污贱鄙龌龊陋卓溪唐梯渔陈枣泥漳浔涧梨芬谯赡辕迦郑単驴弈洽鳌卛占筮卝卞卟吩啉屎翠厄卣卨卪卬卮榫袄玺绶钮蚤惧殆笃耸卲帘帙绕恤卼卽厂厎厓厔厖厗奚厘厍厜厝谅厕厤厥厪腻孢厮厰厳厣厹厺粕垢芜菁厼厾叁悟茸薯叄吵笄悌哺讥坫垄弧芯杠潜婴刍袁诘贪谍煽馈驳収岳缔灾贿骗叚叡吻拦蘑蜜诀燧玩砚筝椎蔺铜逗骊另觅叨唠谒杵姓喊嚷嚣咚咛塑寻恼憎擦只泣渗蝠叱吒咄咤喝籀黛舵舷叵叶铎懿昭穰苴辽叻叼吁堑嫖赌瞧爬众抒吅吆夥卺橡涤抱纵摩郡唁坠扇篮膀袜颈吋忾谘酬哭妓媛暗表缰迩妃羿絮蕃浑拐葵暮隅吔吖啶嗪戚吜啬噬咽吟哦咏吠吧唧嗒咐吪隽咀征燐苞茹钙哧吮吰吱嘎吲哚吴栋娇窟孟箫忠晗淞阖闾趼宇呐睛嘘拂捧疵熄竽笛糠吼吽呀吕韦蒙呃呆笨呇贡呉罄呋喃呎呏呔呠呡痴呣呤呦呧瑛眩扒晬淑姬瑜璇鹃呪呫哔嚅嗫呬呯呰呱呲咧噌钝呴呶呷呸呺呻哱咻啸噜吁坎坷逻呿咁咂咆哮咇咈咋蟹煦珅蔼咍咑咒诅咔哒嚓咾哝哩喱咗咠咡咢咣咥咦咨嗟询咩咪咫啮啮咭咮咱咲咳呛嗽咴啕咸咹咺呙喉咿婉恸悯赋矜绿茗蓝哂抢瞒哆嗦啰噻啾滨彗哋哌哎唷哟哏哐哞哢哤哪里哫啼喘哰哲萎蚌哳咩哽哿呗唅唆唈唉唎唏哗尧棣殇璜睿肃唔睇唕吣唞唣喳唪唬唰喏唲唳唵嘛唶唸唹唻唼唾唿啁啃鹦鹉啅埠栈榷祺铺鞅飙啊啍啎啐啓啕啖啗啜哑祈啢衔啤啥啫啱啲啵啺饥啽噶昆沁喁喂喆裙喈咙喋喌喎喑喒喓喔粗喙幛庆滋鹊喟喣喤喥喦喧骚喨喩梆吃葡萄喭驼挑吓碰枞瓣纯疱藻趟铬喵営喹喺喼喿嗀嗃嗄嗅嗈嗉嗊嗍嗐嗑嗔诟嗕嗖嗙嗛嗜痂癖嗝嗡嗤嗥嗨唢嗬嗯嗰嗲嗵叽嗷嗹嗾嗿嘀嘁嘂嘅惋嘈峪禾荫啀嘌嘏嘐嘒啯啧嘚唛嘞嘟囔嘣嘥嘦嘧嘬嘭这谑严敞馋松哓嘶嗥呒虾嘹嘻啴嘿噀噂噅噇噉噎噏噔噗噘噙噚咝噞噢噤蝉皿噩噫噭嗳噱哙噳嚏涌洒欲巫霏噷噼嚃嚄嚆抖哜尝嚔苏嚚嚜嚞嚟呖嚬嚭嚮嚯亸喾饬按竣苛嚵嘤啭冁呓膪谦囍囒囓囗囘萧酚飘溅谛囝溯眸纥銮鹘囟殉囡団囤囥囧囨囱囫囵囬囮囯囲図囶囷囸囹圄圉拟囻囿圀圂圃圊粹蠹赦圌垦圏滚鲱凿枘圕圛圜圞坯埂壤骸炕祠窑豚绅魠鲮鳖圧握圩圪垯圬圮圯炸岬幔毯祇窨菩溉圳圴圻圾坂坆沾坋坌舛壈昆垫墩椅坒坓坩埚坭坰坱坳坴坵坻坼杨挣涎帘垃垈垌垍垓垔垕垗垚垛垝垣垞垟垤垧垮垵垺垾垿埀畔埄埆埇埈埌殃隍埏埒埕埗埜垭埤埦埧埭埯埰埲埳埴埵埶绋埸培怖桩础辅埼埽堀诃侄庑堃堄摧磐贞韧砌堈堉垩堋堌堍堎垴堙堞堠礁堧堨舆堭堮蜓摘堲堳堽堿塁塄塈煤茔棵塍垲埘塓绸塕鸦沽虱塙冢塝缪塡坞埙塥塩塬塱场螨塼塽塾塿墀墁墈墉墐夯増毁墝墠墦渍钵墫墬堕墰墺墙橱壅壆壊壌壎壒榨蒜壔壕壖圹垆壜壝垅壡壬壭壱売壴壹壻壸寝壿夂夅夆変夊夌漱邑夓腕泄甥御骼夗夘夙衮瑙妊娠醣枭珊莺鹭戗幻魇夤蹀秘擂鸫姚宛闺屿庾挞拇賛蛤裨菠氅漓捞湄蚊霆鲨箐篆篷荆肆舅荔鲆巷惭骰辟邱镕镰阪漂烩鲵鲽鳄鸨胪鹏妒峨谭枰晏玑癸祝秤竺牡籁恢罡蝼蝎赐绒御梭夬夭砣榆怙枕夶夹馅奄崛葩谲奈贺祀赠奌奂奓奕䜣詝奘奜奠奡奣陶奨奁魁奫奬奰娲孩贬隶酥宄狡猾她姹嫣妁毡荼皋膻蝇嫔妄妍嫉媚娆妗趣妚妞妤碍妬娅妯娌妲妳妵妺姁姅姉姗姒姘姙姜姝姞姣姤姧姫姮娥姱姸姺姽婀娀诱慑胁娉婷娑娓娟娣娭娯娵娶娸娼婊婐婕婞婤婥溪孺婧婪婬婹婺婼婽媁媄媊媕媞媟媠媢媬媮妫媲媵媸媺媻媪眯媿嫄嫈袅嫏嫕妪嫘嫚嫜嫠嫡嫦嫩嫪毐嫫嫬嫰妩嫺娴嫽嫿妫嬃嬅嬉耍婵痴艳嬔嬖嬗嫱袅嫒嬢嬷嬦嬬嬭幼嬲嬴婶嬹嬾嬿孀娘孅娈孏曰癫屏孑孓雀孖斟篓谜摺孛矻鸠崮轲祜鸾孥邈毓棠膑孬孭孰孱孳孵泛罔衔孻孪宀宁冗拙株薇掣抚琪瓿榴谧弥宊濂祁瑕宍宏碁宓邸谳実潢町宥宧宨宬徵崎骏掖阙臊煮禽蚕宸豫寀寁寥寃檐庶寎暄碜寔寖寘寙寛寠苫寤肘洱滥蒗陕核寪弘绰螽宝擅疙瘩晷対檐専尃尅赎绌缭畴衅尌峙醌襟痲碧屁昊槌淘恵瀑牝畑莓缸羚觑蔻脏躁尔尓锐尗尙尜尟尢��尨尪尬尭尰擒尲尶尴尸尹潽蠖蛾尻扣梢蚴鳍脬蹲屇屌蚵屐屃挪屖屘屙屛屝屡屣峦嶂岩舄屧屦屩屪屃屮戍驻钾崖嵛巅旮旯楂榄榉芋茱萸靛麓屴屹屺屼岀岊岌岍阜岑彭巩岒岝岢岚岣岧岨岫岱岵岷峁峇峋峒峓峞峠嵋峨峰峱岘峹峿崀崁崆祯崋崌崃岖昆崒崔嵬巍萤颢崚崞崟崠峥巆崤崦崧殂岽崱崳崴崶崿嵂嵇嵊泗嵌嵎嵒嵓岁嵙嵞嵡嵩嵫嵯嵴嵼嵾嵝崭崭晴嶋嶌嶒嶓嵚崂嶙嶝嶞峤嶡嶢峄嶨嶭嶮嶰嶲岙嵘巂巃巇巉岿巌巓巘巛滇芎巟巠弋回巣巤炊擘蜥蟒蛊觋巰蜀彦淖杏茂甫楞巻巽帼巿帛斐鲫蕊帑帔帗帚琉汶帟帡帣帨裙帯帰帷帹暆帏幄帮幋幌幏帻幙帮幞幠幡幢幦幨幩幪帱幭幯幰遥蹉跎馀庚鉴幵幷稚邃庀庁広庄庈庉笠庋跋庖牺庠庤庥鲸庬庱庳庴庵馨衢庹庿廃厩廆廋廌廎廏廐廑廒荫廖廛厮搏锣廞弛袤廥廧廨廪廱绵踵髓廸迫瓯邺廻廼廾廿躔弁皱弇弌弍弎弐弑吊诡憾荐弝弢弣弤弨弭弮弰弪霖繇焘斌旭溥骞弶弸弼弾彀彄别累纠强彔彖彘彟彟陌彤贻彧绘虹彪炳雕蔚鸥彰瘅彲彳彴仿彷徉徨彸彽踩敛旆徂徇徊渭畲铉裼従筌徘徙徜徕膳苏萌渐徬徭醺徯徳徴潘徻徼忀瘁胖燎怦悸颤扉犀澎湃砰恍惚绞隘忉惮挨饿忐忑忒忖応忝忞耿忡忪忭忮忱忸怩忻悠懑怏遏怔怗怚怛怞怼黍讶怫怭懦怱怲恍怵惕怸怹恁恂恇恉恌恏恒恓恔恘恚恛恝恞恟恠恣恧眄恪恫恬澹恰恿悀悁悃悄悆悊悐悒晦悚悛悜悝悤您悩悪悮悰悱凄恻德悴怅惘闷悻悾惄愫钟蒐惆惇惌惎惏惓惔惙惛耄惝疟浊恿惦德恽惴蠢惸拈愀愃愆愈愊愍愐愑愒愓愔愕恪氓蠢騃昵惬赧悫愬愮愯恺愼慁恿慅慆慇霭慉慊愠慝慥怄怂慬慱悭慴慵慷戚焚憀灼郁憃惫憋憍眺捏轼愦憔憖憙憧憬憨憪憭怃憯憷憸憹憺懃懅懆邀懊懋怿懔懐懞懠懤懥恹懫懮懰懱毖懵遁梁雍忏懽戁戄戆戉戋戕戛戝戛戠戡戢戣戤戥戦戬戭戯轰戱披菊牖戸戹戺戻卯戽锹扂楔扃扆扈扊杖牵绢铐镯赉扐搂搅烊盹瞌跟趸镲靶鼾払扗玫腮扛扞扠扡扢盔押扤扦扱罾揄绥鞍郤窾扻扼扽抃抆抈抉抌抏瞎抔缳缢擞抜拗択抨摔歉蹿牾抶抻搐泵菸拃拄拊髀抛拌脯拎拏拑擢秧沓曳挛迂拚拝拠拡拫拭拮踢拴拶拷攒拽掇芥橐簪摹疔挈瓢骥捺蹻挌挍挎挐拣挓挖掘浚挙揍聩挲挶挟挿捂捃捄捅捆捉捋胳膊揎捌捍捎躯蛛捗捘捙捜捥捩扪捭据捱捻捼捽掀掂抡臀膘掊掎掏掐笙掔掗掞棉芍掤搪阐掫掮掯揉掱掲掽掾揃揅揆搓揌诨揕揗揘揜揝揞揠揥揩揪揫橥遒麈揰揲揵揶揸背揺搆搉搊搋搌搎搔搕撼橹捣搘搠搡搢搣搤搥搦搧搨搬楦裢讪赸掏搰搲搳搴揾搷搽搾搿摀摁摂摃摎掴摒摓跤摙摛掼摞摠摦喉羯摭摮挚摰摲抠摴抟摷掺摽撂撃撅稻撊撋挦锏泼撕撙撚㧑挢撢掸撦撅撩撬撱朔揿蚍蜉挝捡擀掳闯擉缶觚擐擕擖擗擡擣擤澡腚擧擨擩擫擭摈拧撷擸撸擽擿攃摅撵攉攥攐攓撄搀撺每攩攫辔澄攮攰攲攴轶攷砭讦攽碘敁敃敇敉叙敎筏敔敕敖闰诲敜煌敧敪敳敹敺敻敿斁衽斄牒绉诌斉斎斓鹑谰驳鳢斒筲斛斝斞斠斡斢斨斫斮晾沂潟颖绛邵斲斸釳於琅斾斿旀旗旃旄涡旌旎旐旒旓旖旛旝旟旡旣浴旰獭魃旴时旻旼旽昀昃昄昇昉晰躲澈熹皎皓矾昑昕昜昝昞昡昤晖笋昦昨是昱昳昴昶昺昻晁蹇隧蔬髦晄晅晒晛晜晞晟晡晢晤晥曦晩萘莹顗晿暁暋暌暍暐暔暕煅旸暝暠暡曚暦暨暪朦胧昵暲殄冯暵暸暹暻暾曀晔昙曈曌曏曐暧曘曙曛叠昽曩骆曱甴肱曷牍禺锟曽沧耽朁朅朆杪栓夸竟粘绦朊膺朏朐朓朕朘朙瞄觐溘饔飧朠朢朣栅椆淀虱朩朮朰朱炆璋钰炽鹮朳槿朵朾朿杅杇杌陧欣钊湛漼楷瀍煜玟缨翱肇舜贽适逵杓杕杗杙荀蘅杝杞脩珓筊杰榔狍閦颦缅莞杲杳眇杴杶杸杻杼枋枌枒枓衾葄翘纾逋枙狸桠枟槁枲枳枴枵枷枸橼枹枻柁柂柃柅柈柊柎某柑橘柒柘柙柚柜柞栎柟柢柣柤柩柬柮柰柲橙柶柷柸柺査柿栃栄栒栔栘栝栟柏栩栫栭栱栲栳栴檀栵栻桀骜桁镁桄桉桋桎梏椹葚桓桔桕桜桟桫椤桭杯桯桲桴桷桹湘溟梃梊梍梐潼栀枧梜梠梡梣梧梩梱梲梳梴梵梹棁棃樱棐棑棕榈簑绷蓑枨棘棜棨棩棪棫棬棯棰棱棳棸棹椁棼碗椄苕椈椊椋椌椐椑椓椗検椤椪椰椳椴椵椷椸椽椿楀匾楅篪楋楍楎楗楘楙楛楝楟楠楢楥桢楩楪楫楬楮楯楰梅楸楹楻楽榀榃榊榎槺榕榖榘榛狉莽搒笞榠榡榤榥榦榧杩榭榰榱梿霰榼榾桤槊闩槎槑槔槖様槜槢槥椠槪槭椮槱槲槻槼槾樆樊樏樑樕樗樘樛樟樠樧樨権樲樴樵猢狲桦樻罍樾樿橁橄橆桡笥龠橕橚橛辆椭橤橧竖膈跨橾橿檩檃檇柽檍檎檑檖檗桧槚檠樯檨檫檬梼槟檴檵柠棹櫆櫌栉櫜椟櫡槠栌枥榇栊櫹棂茄櫽欀欂欃欐欑栾欙棂溴欨欬欱欵欶欷歔欸欹欻欼欿歁歃歆艎歈歊莳蝶歓歕歘歙歛歜欤歠蹦诠镶蹒跚升陟歩歮歯歰歳歴璞歺瞑歾殁夭殈殍殑殗殜殙殛殒殢殣殥殪殚僵殰殳荃殷殸殹蛟殻肴谤殴毈毉喂毎���蕈毗毘毚茛邓毧毬毳毷毹毽毾毵牦氄氆靴氉氊氇氍氐聊氕氖気氘氙氚氛氜氝氡汹焊痉氤氲氥氦铝锌氪烃氩铵痤汪浒漉痘盂碾菖蒲蕹蛭螅氵冰氹氺氽烫氾氿渚汆汊汋汍汎汏汐汔汕褟汙汚汜蓠沼秽蔑汧汨汩汭汲汳汴堤汾沄沅沆瀣沇沈葆浸沦湎溺痼疴沌沍沏沐沔沕沘浜畹砾沚沢沬沭沮沰沱灢沴沷籽沺烹濡洄泂肛泅泆涌肓泐泑泒泓泔泖泙泚泜泝泠漩馍涛粼泞藓鳅泩泫泭泯铢泱泲洇洊泾琵琶荽蓟箔洌洎洏洑潄濯洙洚洟洢洣洧洨洩痢滔洫洮洳洴洵洸洹洺洼洿淌蜚浄浉浙赣渫浠浡浤浥淼瀚浬浭翩萍浯浰蜃淀苔蛞蝓蜇螵蛸煲鲤浃浼浽溦涂涊涐涑涒涔滂莅涘涙涪涫涬涮涴涶涷涿淄淅淆淊凄黯淓淙涟淜淝淟淠淢淤渌淦淩猥藿亵淬淮淯淰淳诣涞纺淸淹炖癯绮渇済渉渋渓渕涣渟渢滓渤澥渧渨渮渰渲渶渼湅湉湋湍湑湓湔黔湜湝浈湟湢湣湩湫湮麟湱湲湴涅満沩溍溎溏溛舐漭溠溤溧驯溮溱溲溳溵溷溻溼溽溾滁滃滉滊荥滏稽滕滘汇滝滫滮羼耷卤滹浐煎漈漊漎绎漕漖漘漙沤漜漪漾漥漦漯漰溆漶漷濞潀颍潎潏潕潗潚潝潞潠潦祉疡潲潵滗潸潺潾涠澁澂澃澉澌澍澐澒澔澙渑澣澦澧澨澫澬浍澰澴澶澼熏郁濆濇濈濉濊貊濔疣濜濠濩觞浚濮盥潍濲泺瀁滢渎渖瀌浏瀒瀔濒泸瀛潇潆瀡潴泷濑瀬弥潋瀳瀵瀹瀺瀼沣滠灉灋灒漓灖灏灞灠滦灥灨滟灪蜴灮烬獴灴灸灺炁炅鱿炗炘炙炤炫疽烙钎炯炰炱炲炴炷毁炻烀烋瘴鲳烓烔焙烜烝烳饪烺焃焄耆焌焐焓焗焜焞焠焢焮焯焱焼煁煃煆煇煊熠煍熬煐炜煕暖熏硷霾煚煝煟煠茕矸煨琐炀萁煳煺煻熀熅熇熉罴荧穹炝熘熛熜稔谙烁熤熨熯熰眶蚂颎熳熸熿燀烨燂燄盏燊燋燏燔隼燖焖燠燡灿燨燮燹燻燽燿爇爊爓爚爝爟爨蟾爯爰为爻丬爿牀牁牂牄牋窗牏牓窗釉牚腩蒡虻牠虽蛎牣牤牮牯牲牳牴牷牸牼绊牿靬犂犄犆犇犉犍犎犒荦犗犛犟犠犨犩犪犮犰狳犴犵犺狁甩狃狆狎狒獾狘狙黠狨狩狫狴狷狺狻豕狈蜘猁猇猈猊猋猓猖獗猗猘狰狞犸猞猟獕猭猱猲猳猷猸猹猺玃獀獃獉獍獏獐獒毙獙獚獜獝獞獠獢獣獧鼇蹊狯猃獬豸狝獯鬻獳犷猕猡玁菟玅玆玈珉糁禛郅玍玎玓瓅玔玕玖玗玘玞玠玡玢玤玥玦珏瑰玭玳瑁玶玷玹玼珂珇珈瑚珌馐馔珔珖珙珛珞珡珣珥珧珩珪佩珶珷珺珽琀琁陨玡琇琖琚琠琤琦琨琫琬琭琮琯琰琱琲琅琴珐珲瑀瑂瑄瑉玮瑑瑔瑗瑢瑭瑱瑲瑳瑽瑾瑿璀璨璁璅璆璈琏璊璐璘璚璝璟璠璡璥瑷璩璪璫璯璲玙璸璺璿瓀璎瓖瓘瓒瓛脐瓞瓠瓤瓧瓩瓮瓰瓱瓴瓸瓻瓼甀甁甃甄甇甋甍甎甏甑甒甓甔瓮甖甗饴蔗甙诧钜粱盎锈团甡褥産甪甬甭甮宁铠甹甽甾甿畀畁畇畈畊畋畎畓畚畛畟鄂畤畦畧荻畯畳畵畷畸畽畾疃叠疋疍疎箪疐疒疕疘疝疢疥疧疳疶疿痁痄痊痌痍痏痐痒痔痗瘢痚痠痡痣痦痩痭痯痱痳痵痻痿瘀痖瘃瘈瘉瘊瘌瘏瘐痪瘕瘖瘙瘚瘛疭瘜瘝瘗瘠瘥瘨瘭瘆瘯瘰疬瘳疠瘵瘸瘺瘘瘼癃痨痫癈癎癐癔癙癜癠疖症癞蟆癪瘿痈発踔绀蔫酵皙砬砒翎翳蔹钨镴皑鹎驹暨粤褶皀皁荚皃镈皈皌皋皒朱皕皖皘皜皝皞皤皦皨皪皫皭糙绽皴皲皻皽盅盋碗盍盚盝踞盦盩秋千盬盭眦睁瞤盯盱眙裰盵盻睐眂眅眈眊県眑眕眚眛眞眢眣眭眳眴眵眹瞓眽郛睃睅睆睊睍睎困睒睖睙睟睠睢睥睪睾睯睽睾眯瞈瞋瞍逛瞏瞕瞖眍䁖瞟瞠瞢瞫瞭瞳瞵瞷瞹瞽阇瞿眬矉矍铄矔矗矙瞩矞矟矠矣矧矬矫矰矱硪碇磙罅舫阡、矼矽礓砃砅砆砉砍砑砕砝砟砠砢砦砧砩砫砮砳艏砵砹砼硇硌硍硎硏硐硒硜硖砗磲茚钡硭硻硾碃碉碏碣碓碔碞碡碪碫碬砀碯碲砜碻礴磈磉磎硙磔磕磖磛磟磠磡磤磥蹭磪磬磴磵磹磻硗礀硚礅礌礐礚礜礞礤礧礮砻礲礵礽礿祂祄祅祆禳祊祍祏祓祔祕祗祘祛祧祫祲祻祼饵脔锢禂禇禋祦禔祎隋禖禘禚禜禝禠祃禢禤禥禨禫祢禴禸秆秈秊闱飒秋秏秕笈蘵赁秠秣秪秫秬秭秷秸稊稌稍稑稗稙稛稞稬秸稲稹稼颡稿穂穄穇穈穉穋稣贮穏穜穟秾穑穣穤穧穨穭穮穵穸窿阒窀窂窅窆窈窕窊窋窌窒窗窔窞窣窬黩蹙窑窳窴窵窭窸窗竁竃竈竑竜并竦竖篦篾笆鲛竾笉笊笎笏笐靥笓笤箓笪笫笭笮笰笱笲笳笵笸笻筀筅筇筈筎筑筘筠筤筥筦笕筒筭箸筰筱筳筴宴筸箂个箊箎箑箒箘箙箛箜篌箝箠箬镞箯箴箾篁筼筜篘篙篚篛篜篝篟篠篡篢篥篧篨篭篰篲筚篴篶篹篼箦簁簃簆簉簋簌簏簜簟簠簥簦簨簬簰簸簻籊藤籒籓籔签籚篯箨籣籥籧笾簖籫籯芾麴籵籸籹籼粁秕粋粑粔粝粛粞粢粧粨粲粳稗粻粽辟粿糅糆糈糌糍糒糔萼糗蛆蹋糢糨糬粽糯糱籴粜糸糺紃蹼鲣霉纡纨绔纫闽襻紑纰纮锭鸢鹞纴紞紟扎紩紬绂绁纻紽紾绐絁絃絅経絍绗絏缡褵絓絖絘絜绚絣螯絪絫聒絰絵绝絺絻絿綀绡綅绠绨绣綌綍綎捆綖綘継続缎绻綦綪线綮綯绾罟蝽綷縩绺绫緁绲緅緆缁绯緌緎総緑绱緖缃缄缂绵缗緤褓缌纂緪緰缑缈缏缇縁縃縄萦缙缒縏缣縕缞縚缜缟缛縠縡縢縦绦縯縰骋缧縳纤缦絷缥縻衙縿繄缫繈繊繋繐缯繖繘繙繠缋繣繨缰缲繸繻缱纁纆纇缬缵纩纑纕缵纙纚纛缾罃罆坛罋罂罎罏罖罘罛罝罠罣罥罦罨罫罭锾罳罶罹罻罽罿羂羃羇芈蕉51鸵羑羖羌羜羝羢羣羟羧羭羮羰羱羵羶羸藜鲐翀翃翅翊翌翏翕翛翟翡翣翥翦跹翪翫翚翮翯翱翽翾翿板饕鸹锨耋耇耎耏专耒耜耔耞耡耤耨耩耪耧耰鬓耵聍聃聆聎聝聡聦聱聴聂聼阈聿肄肏肐肕腋肙肜肟肧胛肫肬肭肰肴肵肸肼胊胍胏胑胔胗胙胝胠铨胤胦胩胬胭胯胰胲胴胹胻胼胾脇脘脝脞脡脣脤脥脧脰脲脳腆腊腌臜腍腒腓胨腜腠脶腥腧腬腯踝蹬镣腴腶蠕诽膂腽嗉膇膋膔腘膗膙膟黐膣膦膫膰膴膵膷脍臃臄臇臈臌臐臑臓膘臖臙臛臝臞臧蓐诩臽臾臿舀舁鳑鲏舋舎舔舗馆舝舠舡舢舨舭舲舳舴舸舺艁艄艅艉艋艑艕艖艗艘艚艜艟艣舣艨艩舻艬艭荏艴艳艸艹艻艿芃芄芊萰陂藭芏芔芘芚蕙芟芣芤茉芧芨芩芪芮芰鲢芴芷芸荛豢芼芿苄苒苘苙苜蓿苠苡苣荬苤苎苪镑苶苹苺苻苾茀茁范蠡萣茆茇茈茌茍茖茞茠茢茥茦菰茭茯茳藨茷藘茼荁荄荅荇荈菅蜢鸮荍荑荘豆荵荸荠莆莒莔莕莘莙莚莛莜莝莦莨菪莩莪莭莰莿菀菆菉菎菏菐菑菓菔芲菘菝菡菢菣菥蓂菧菫毂蓥菶菷菹醢菺菻菼菾萅萆苌萋萏萐萑萜萩萱萴莴扁萻葇葍葎葑荭葖葙葠葥苇葧葭药葳葴葶葸葹葽蒄蒎莼茏薹莅蒟蒻蒢蒦蒨蒭藁蒯蒱鉾蒴蒹蒺蒽荪蓁蓆蓇蓊蓌蓍蓏蓓蓖蓧蓪蓫荜跣藕苁蓰蓱莼蓷蓺蓼蔀蔂蔃蔆蔇蔉蔊蔋蔌蔎蔕蔘蔙蒌蔟锷蒋雯茑蔯蔳麻蔵蔸蔾荨蒇蕋蕍荞蕐蕑芸莸蕖蕗蕝蕞蕠蕡蒉蕣蕤蕨蕳蓣蕸蕺蕻薀薁薃薅薆荟薉芗薏薐蔷薖薘剃谔钗薜薠薢薤薧薨薫薬薳薶薷薸薽薾薿藄藇藋荩藐藙藚藟藦藳藴苈藷藾蘀蘁蕲苹蘗蘘蘝蘤蘧蘩蘸蘼虀虆虍蟠虒虓虖虡虣虥虩虬虰蛵蛇虷鳟虺虼蚆蚈蚋蚓蚔蚖蚘蚜蚡蚣蚧蚨蚩蚪蚯蚰蜒蚱蚳蚶蚹蚺蚻蚿蛀蛁蛄蛅蝮蛌蛍蛐蟮蛑蛓蛔蛘蛚蛜蛡蛣蜊蛩蛱蜕螫蜅蚬蜈蝣蜋蜍蜎蜑蠊蜛饯蜞蜣蜨蜩蜮蜱蜷蜺蜾蜿蝀蝃蝋蝌蝍蝎蝏蝗蝘蝙蝝鲼蝡蝤蝥猿蝰虻蝲蝴蝻螃蠏蛳螉螋螒螓螗螘螙螚蟥螟螣螥螬螭䗖螾螀蟀蟅蝈蟊蟋蟑蟓蟛蟜蟟蟢虮蟨蟪蟭蛲蟳蛏蟷蟺蟿蠁蠂蠃虿蠋蛴蠓蚝蠗蠙蠚蠛蠜蠧蟏蠩蜂蠮蠰蠲蠵蠸蠼蠽衁衄衄衇衈衉衋衎衒同衖胡衞裳钩衭衲衵衹衺衿袈裟袗袚袟袢袪袮袲袴袷袺袼褙袽裀裉袅裋夹裍裎裒裛裯裱裲裴裾褀褂褉褊裈褎褐褒褓褔褕袆褚褡褢褦褧褪褫袅褯褰褱裆褛褽褾襁褒襆裥襉襋襌襏襚襛襜裣襞襡襢褴襦襫襬襭襮襕襶襼襽襾覂覃覅霸覉覊覌覗觇覚覜觍觎覧覩觊觏覰観觌觔觕觖觜觽觝觡酲觩觫觭觱觳觯觷觼觾觿言赅讣訇訏訑訒诂讬訧訬訳訹证訾詀詅诋毁詈詊讵詑诒诐詗诎察詨诜詶詸詹詻诙诖誂誃诔锄诓誋诳诶悖誙诮诰誧説読誯谇訚谄谆諆諌诤诹诼諕谂谀諝谝諟喧谥諴諵谌谖誊謆謇歌謍謏謑谡谥謡謦謪谪讴謷謼谩哗譅譆譈譊讹譒撰谮鑫譞噪譩谵譬譱譲谴譸譹谫讅讆詟䜩雠讐谗谶讙谠讟谽豁豉豇岂豊豋豌豏豔豞豖豗豜豝豣豦豨豭豱豳豵豶豷豺豻貅貆狸猊貔貘䝙貜貤餍贳餸贶贲赂賏赊赇赒賝赓赕賨赍斗賮賵賸赚赙赜赟贉赆赑贕赝赬赭赱赳迄趁趂趄趐趑趒趔趡趦趫趮趯趱趴趵趷趹趺趿跁跂跅跆踬跄跐跕跖跗跙跛跦跧跩跫跬跮跱跲跴跺跼跽踅踆踈踉踊踒踖踘踜踟躇蹰踠踡踣踤踥踦踧跷踫踮逾踱踊踶踹踺踼踽躞蹁蹂躏蹎蹐蹓蹔跸蹚蹜蹝迹蹠蹡蹢跶蹧蹩蹪蹯鞠蹽躃躄躅踌跻躐踯跞躘躙躗躝躠蹑躜躧躩躭躰躬躶軃軆辊軏轫軘軜軝腭転軥軨軭軱轱辘軷轵轺軽軿輀輂辇辂辁輈挽輗辄辎辋輠輤輬輭輮辏輴輵輶輹輼辗辒轇轏轑轒辚轕轖轗轘轙轝轞轹轳罪辣辞辵辶辺込辿迅迋迍麿迓迣迤逦迥迨迮迸迺迻迿逄逅逌逍逑逓迳逖逡逭逯逴逶逹遄遅侦遘遛遝遢遨遫遯遰遴绕遹遻邂邅邉邋邎邕邗邘邛邠邢邧邨邯郸邰邲邳邴邶邷邽邾邿郃郄郇郈郔郕郗郙郚郜郝郞郏郠郢郪郫郯郰郲郳郴郷郹郾郿鄀鄄郓鄇鄈鄋鄍鄎鄏鄐鄑邹邬鄕郧鄗鄘鄚鄜鄞鄠鄢鄣鄤鄦鄩鄫鄬鄮鄯鄱郐鄷鄹邝鄻鄾鄿酃酅酆酇郦酊酋酎酏酐酣酔酕醄酖酗酞酡酢酤酩酴酹酺醁醅醆醊醍醐醑醓醖醝酝醡醤醨醪醭醯醰酦醲醴醵醸醹醼醽醾釂酾酽釆釈鲈镏阊钆钇钌钯钋鼢鼹钐钏釪釬釭釱钍釸钕钫鈃钭鈆鈇钚鈊鈌钤钣鈒鈤钬钪鈬铌铈钶铛钹铍钸钿鉄鉆铊铇鉌铋鉏铂钷铆钵鉥钲鉨钼钽鉱鉲鉶铰铒鉼铪銍銎铣銕镂铫铦铑铷銤铱铟銧铥铕铯銭銰焊銶锑锉汞鋂锒鋆鋈鋊铤鋍铗鋐鋑鋕鋘鋙锊锓锔锇铓鋭铖锆锂铽鋳鋹鋺鉴镚钎錀锞锖锫锩錍铔锕錔锱铮锛錞锬锜錤錩錬録铼錼锝钔锴鍉镀鍏鍐铡鍚锻锽锸锲锘鍫鍭鍱鍴锶鍹锗针锺锿镅鎉鎋鎌鎍鎏鎒鎓鎗镉鎚鎞镃鎤铩锼鎭鎯镒镍鎴镓��鎹镎镟鏊镆镠镝鏖铿锵鏚镗镘镛鏠鏦錾镤鏸镪鏻鏽鏾铙鐄鐇鐏铹镦镡鐗馗镫镢镨鐡锎镄鐩镌鐬鐱镭鐶鐻鐽镱鑀鑅镔鑐鑕鑚鑛鑢鑤镥鑪镧鑯鑱鑴鑵镊镢钃镻闫闬闶闳閒闵閗閟阂関合閤哄阆閲阉閺阎阏阍阌暗闉阕阗闑闒闿闘闚阚闟闠闤闼阞阢阤阨阬阯阹阼阽陁陑陔陛陜陡陥陬骘陴険陼陾阴隃隈隒隗隞隠隣隤隩隮隰颧隳隷隹雂雈雉雊雎雑雒雗雘雚雝雟雩雰雱驿霂霅霈霊沾霒霓霙霝霢霣霤霨霩霪霫霮靁叇叆靑靓靣腼靪靮靰靳靷靸靺靼靿鞀鞃鞄鞍鞗鞙鞚鞝鞞鞡鞣鞨鞫鞬鞮鞶鞹鞾鞑韅鞯驮韍韎韔韖韘韝韫韡韣韭韭韱韹韺頀刮頄顸顼頍颀颃颁頖頞頠頫頬颅頯頲颕頼悴顋顑颙颛颜顕顚顜颟顣颥颞飐飑台飓颸飏飖颽颾颿飀飂飚飌翻飡飣饲飥饨饫飮飧飶餀餂饸饹餇餈饽哺馂餖餗餚馄馃餟餠餤餧餩餪餫糊餮糇餲饧馎糕饩馈馊馌馒饇馑馓膳饎饐饘饟馕馘馥馝馡馣骝骡馵馹駃駄駅駆駉駋驽駓驵駗骀驸駜骂骈駪駬骃駴骎駹駽駾騂騄骓騆騉騋骒骐麟騑騒験騕骛騠騢騣騤騧骧騵驺骟騺蓦骖骠骢驆驈骅驌骁驎骣驒驔驖驙驦驩驫骺鲠骫骭肮骱骴骶骷髅骾髁髂髄髆膀髇髑髌髋髙髝髞髟髡髣髧髪髫髭髯髲髳髹髺髽髾鬁鬃鬅鬈鬋鬎鬏鬐鬑鬒鬖鬗鬘鬙鬠鬣斗鬫鬬阄鬯鬰鬲鬵鬷魆魈魊魋魍魉魑魖鳔魛魟魣魦魨魬鲂魵魸鮀鲅鮆鲧鲇鲍鲋鮓鲒鲕鮟鱇鮠鮦鮨鲔鲑鮶鮸鮿鲧鯄鯆鲩鯈鲻鯕鲭鲞鯙鯠鲲鯥鲰鲶鳀鯸鳊鲗䲠鹣鳇鰋鳄鳆鰕鰛鰜鲥鰤鳏鰦鳎鳐鳁鳓鰶鲦鲡鰼鰽鱀鱄鳙鱆鳕鱎鱐鳝鳝鳜鲟鲎鱠鳣鱨鲚鱮鱲鱵鱻鲅鳦凫鳯鳲鳷鳻鴂鴃鴄鸩鴈鴎鸰鴔鴗鸳鸯鸲鹆鸱鴠鴢鸪鴥鸸鹋鴳鸻鴷鴽鵀鵁鸺鹁鵖鵙鹈鹕鹅鵟鵩鹌鵫鵵鵷鵻鹍鶂鶊鶏鶒鹙鶗鶡鶤鶦鶬鶱鹟鶵鶸鶹鹡鶿鹚鷁鷃鷄鷇䴘䴘鷊鷏鹧鷕鹥鸷鷞鷟鸶鹪鹩鷩鷫鷭鹇鹇鸴鷾䴙鸂鸇䴙鸏鸑鸒鸓鸬鹳鸜鹂鹸咸鹾麀麂麃麄麇麋麌麐麑麒麚麛麝麤麸面麫麮麯麰麺麾黁黈黉黢黒黓黕黙黝黟黥黦黧黮黰黱黪黶黹黻黼黾鼋鼂鼃鼅鼈鼍鼏鼐鼒冬鼖鼙鼚鼛鼡鼩鼱鼪鼫鼯鼷鼽齁齆齇齈齉齌赍齑龀齕齗龅齚龇齞龃龉龆齢出齧齩齮齯齰齱齵齾厐龑龒龚龖龘龝龡龢龤'
|
18 |
-
|
19 |
-
traditional_characters = '制咖片型超聲盤鑒定仔點他命書歌粉巾字帳恤手指記憶棒形轉彎溝光○〇㐄㐅㐆㐌㐖毒㐜㐡㐤㐰㐺㑇㑳㒳㒸㔾㗂㗎㝵㞎㞙㞞㠯㢲㢴㤅㥁㥯㨗㫺㬎㮎㮚㮸㲋㲱㲾㳮㵎㵪㶸㷖㷭㹢㹴犬㺢狓㺵㼝㽮㿝䍃䔢䖟䖸䗈䗥䗪䝓䠶䥯䦉䯝䰾魚䲔䳗䳘䵹鼄䶑一對應映射丁不識下兒子做二休世丘之貉並中台原則串為甚謂乾淨了百事無成八變五十些人得道雞升天代如併來去個國政策勁幽靈在歐洲遊蕩接樣蘿蔔坑側化傳價元論醇共再准刀兩斷切分耕耘收穫錢貨物向看舊就緒險刻千金動勞永逸匙零夜半卡通回復返影蹤反常態口咬氣句話同吐快吹周味呼諾嗚品紅鍋哄而散起唱和問三知生熟團漆黑火糟堆場空塊麵塌糊塗塵染壁廂夔已足多情露水大早到晚夫妻當關萬莫開失古恨套所料既往孔見提師要家主審寸陰難買鬥牛小撮部陣局展身層巴掌帆風順席地帶過年計於春頭載四季期被蛇怕井繩度願式份彈頃深前律徑心意念差愁孤行俱全房廳交遮打技長把抓死拿眼淚鼻涕鑰鎖折段抿拍即合掃排掬揮撥擁上入擊洞擲攬改故轍敗文值名斑方面旁族日秋餐隔雅里終父旦時晌會霎間晃暴寒曝更月望垠際朝夕本正經利杯羹東西板枝獨秀根筋桿進條龍服務概模次函數又性程總付步腳印趨登毛拔呵氧氮碳決雌雄波未平派謊言流清楚白準溜煙潭有獲聞是處降琴鶴甲病發可拾沙目然瞭直以相眨穿睹瞥瞬矢的解石鳥神教秉虔誠秘種窩蜂窮竅笑置筆苟勾銷抹殺煞等獎箍節吃箭仇雙鵰詩籌籮筐系列紙級士官統絲毫掛維網盡線微吭響股腦胎脈承腔臂力致效資源址器舉功投般說講規貿易葉障著慎滿皆輸號木電池衣傾鐘高低視仁覺醒覽遺角銀幣觸潰九鼎蔽抄出駟馬追重語破貧洗貫走路安蹴至幾蹶振躍役膽汗較輩輪辭贊退六連遍遞邊針血錘音錯門思閃真倒項栽霧類保護川先驚乍體鬨鱗爪鳴滴泡鄰域黨專鼓作齊炒丑烯亥克內酯冬加奴卯肝炎基尺梁街褲鎬客寵庭巳汝昌烷玲磊糖肇酉醛啷青縣韙良香骨鯛丂七集河市弦喜嘴張舌堵區工業姊妹星架構巧彩扭歪拼湊餘熱曜武州爺浮屠美鄉老階樹葷素碎落能魄鰓鰻珠丄丅丆万俟丈尚摸母娘量管群亞虎必我堂令申件裝伏位博俠義界表女墟臺戲臭皮匠勝諸葛亮賽頂倍催請運算包立叉戟離疫苗土史志演圍揭瓦曬夷姑婆帝村寶爛尖杉鹼屜桌山岔島由紀峽壩庫鎮廢從德後拗湯治旬食明昧曹朋友框欄極權冪曲歸依貓民氟硼氯磷鐵江侗自旅法司洋浦梅園溫暖灣焦班幸用田略番疊皇炮捶硝苯酸腺苷稜草鏡穗跳遠索錦綱聚氰胺聯店胚膲愛色堇紫羅蘭芝茶飯菱雲蟲藏藩亂叛蘇親債凳學座恐戀柱測肌腹衩錐係貂企烏跪叩軍車農題迭都甘油屯奏鍵短阿姨陪姐隻顧茅廬槽駕魂鮮鹿頁其菜單乘任供勢午齒漢組織吊調瀉唇坡城報墳外夸將尉建築岸崗公床揚新劍昇杭林栗校樓標款汽社浣海商館劇院鋼華港機械廣媒環球融第醫科證券綜財樂育游漲猶嶺疏癮瞼確兵領導繳肢膛船艾瑟爾蒼蔡虞傚衫覆訪訴課諭議軌述野鉤限敵鞋頜頷顎饒首齦站例修凡劃垂屆屬崽頦廚拜挫擺放旋削棋榻檻禮沉注滑營獄畫确儀聘花葬詔員跌轄週達酒錨閘陷陸雨雪飛威丌于丹久乏予理評產亢卑亦乎舞己悲矩圓詞害誌但住佞佳便俗信票案幅翁倦倫假偏倚斜虧鬼敲停備傷脾胃僅此像儉匱免宜穴焉戴兼容許凍伯仲負彼晝皂軒輊實刊划顛衛戰哥比省非好黃飾別拘束掩奶睬選擇搖擾煩苦枚寫協厭及格受歡迎約只估侵犯割狀告或缺抗拒挽撤救藥喻磨滅端倪少逆逾越避靠適吉譽吝玉含延咎歹聽啻淵善謀均勻堪忍夠太惹妙妥妨孕症孝術室完納推冠積宣疑辯慄碴稱屈撓屑干涉衡待很忙惡忿怎麼怠急恥恭息悅惑惜惟想愉愧怍慌憤啟懂懈懷材才緊招認扣抵拉捨也罷插揣冒搭撞南牆擴核支攻敢雷攀敬裡嗎需景智暇曾罪遇朽枉止況競爭辱求癒渝溶濟左右袒困補爽特寂寞示弱找謝畏強疾徐痛癢冤符眠睦瞅董何厚云措活疲羞者輕玻璃祥兆禁移稂莠穩佛換答簡結果盟絕縷途給談否羈翼耐肖脛毋寧興舒若菲萊痕跡窠臼虛衰臉兔撒鷹棺範該詳諱抬泰讓鬚眉象眾貲賬費灰賴奇慮訓輟辨菽麥辛近送透逞徒速續逮捕遂遑違遜斧鉞艱醉鏽隨觀棄顯飽脂肪使丏丐幫丒且慢末丕替桃宗王尊涼爵各圖屋脊糧署錄壇吾祿職胄襲君廈丗北壑桐疹損逢陵鷸丙寅戌氨腈唑綸辰酮脫氫酶醚丞丟現掉紗帽弄扯砲碗丠両丣坐存激肩臻蒂蓮悖序驅丨丩丫挺杈髻鬟細介俄伊犁京尼布訂普渡央委監察檢查劑圈設警隊斯督剩震境航舶革防托播促質版蠑螈鋒研藝歷殘消頻譜精密製造陲郵候埔堅壓壢凹匯執府究邦俘攝寮彬狼嶽肺腫庸英訊診埋粒胞括控碼韓暑槍樞砥澳哇牟壽甸鑽探篇簽綴縫繼耳肯照婦埃懸璧軸櫃檯辣擱淺邪跑纖阮陽私囊魔丮丰姿采丱燒丳丵丶丷丸參寨朗桂瑞砂衷霞貌鳳僕艦因嫌宰峰幹絡牌持旨祭禱簿編罰賓辦丼丿乀乂乃乄仰慕盛曠留考驗闊乆乇么醜麼乊湖燃乑乒乓乕乖僻忤戾离謬迕乗危肥劫除隙浪婿乙炔腸酰吡咯鹽乚乛乜嘢卿玄宮尾狐龜塔嶷兄弟泉章霄釘耙乞扎哀憐恕討乢乣乤乥乧乨乩童乪乫乭乳暈汁液瑤漿牙癌突竇罩腐膠豬酪蛋糕菌瘤乴乵乶乷乸乹乺乼乾俸冰嘉噦嚎坤媽屍壘旱枯涸俐渴潮澀煸豆燥爹瘦癟癬瞪袋脆薑貝隆餾乿亀亁叫咕攘扔搞男砸竄蓬麻亃亄亅卻亇遲典今臨繁累卵奉婚聰躬巨與遷添裂副宿歲怪噁尕崙愣杆硅硫鈦鈾錳芑雜異鈉砷胂磺琥珀艙棍簧胡茬盜浩盆販郎腿亍洪亐互欠助勉惠操斥諉繫戶譯亓墓碑刑鈴卅渠繽紛斗米旗憲釩燈徽瘟祖拳福穀豐臟腑綁肉醃苓蘊橋鋪霸顏鬧判噴岡底蛙陘礦亖亙亜罕們娜桑那努哈喀弗烈曼松森杜氏盃奧琛敦戊穆聖裔彙薛孫亟亡佚虜羊牢奮釋卷卸契媾感額睫纏誼趾塞擠紐阻還配馳莊亨洛祚亪享津滬畿郊慈菴枇杷膏亭閣鋥麗亳亶亹誅初責翻瘋偶傑叢稠妖拖寰居吸授慧蝸吞壯魅狗矛盾益渣患憂稀描猿夢暫涯畜禍緣沸搜引擎臣橫紜誰混援蒸獸獅稅剖亻亼亽亾什獻剎邡麽仂仃仄仆富怨仈仉畢昔晨殼紹仍仏仒仕宦仗欺恃腰嘆歎炬梓訖施仙后瓊逝仚仝仞仟悔仡佬償填泊拓撲簇羔購頓欽佩髮棻閫馭養億儆尤藉幀賑凌敘帖李柔剛沃眥睚戒訛取饗讀仨仫仮著泳臥躺韶夏裁仳仵唯賢憑釣誕仿似宋彿諷伀碩盼鵝伄儅伈伉儷柯始娃邁戈坦堡帕茨薩廟瑪莉莎藤霍姆伋伍奢胥廷芳豪伎倆侍汛勒希羲雛伐憩整謨閑閒伕伙伴頤伜伝伢叔恆茲恩翰伱伲侶伶俜悧鼬伸懶縮喇叭伹伺伻伽倻輻伾佀佃佇佈喬妮墨佉盧佌貸劣廉昂檔濃矮傘窪緩耗胸谷迷擋率齲宅沫舍療佐貳佑佔優據鏵嘗呢須魯曉佗佘余坪寺瓜銃僧蒙芒陀龕哼嘔坊姦孽弊揖祟繭縛誓賊佝僂瞀佟你奪趕佡佢佣佤佧賈佪佫佯佰佱潔績釀餚佴捲佶佷佸佹佺佻佼佽佾具喚窘壞娛怒慨硬習慣聾膨脹蔓駭貴痺侀侁侂侃侄侅鴻燕侇侈糜靡侉侌妾侏儒倉鼠侐侑侔侖侘侚鏈侜偎傍鈷循柳葫蘆附価侮罵蔑侯岩截蝕侷貼壺嬛宴捷攜桶箋酌俁狹膝狄俅俉俊俏俎俑俓俔諺俚俛黎健呈固墒增守康箱濕祐鏢鑣槓盒靖膜齡俞豹獵噪孚封札筒託衍鴿剪撰稿煉廠禊練繕葺俯瞰撐衝俲俳俴俵俶俷俺俻俾倀倂倅儲卒惶敷猝逃頡蓄崇隱倌倏忽刺蠟燭噍嚼坍扁抽斃蔥楣灌灶糞背藪賣賠閉霉騰倓倔倖倘倜儻倝借箸挹澆閱倡狂倢倣値倥傯倨��倩匡嗣沖柝珍倬倭寇猩倮倶倷倹勤讚偁偃充偽吏嗓寐惺扮拱芫茜藉虢鈔偈偉晶偌宕距析濾殿疼癱註頗偓偕鴨歇滯偝偟偢忘怡旺偨偩偪偫偭偯偰偱偲偵緝蹄偷減惰漏窺竊偸偺迹傀儡傅傈僳傌籬傎奎琳迪叟芭傒傔傕傖悉荒傜傞傢傣芽逼傭婢傮睨寄檄誦謠頌傴擔辜弓慘蒿悼疤傺傻屄臆巢洩篋羨蓋軋頹傿儸僄僇僉僊働僎僑僔僖僚僝僞僣僤僥僦猴僨僩僬僭僮僯僰僱僵殖籤靜僾僿征隴儁儂儃儇儈朴薄儊儋儌儍儐儓儔儕儗儘儜儞儤儦儩汰哉寡渥裕酷儭儱罐儳儵儹儺儼儽兀臬臲鷲允勛勳宙宵帥憝彞諧嫂鬩暢沛溢盈飢赫兇悍狠猛頑愚妣斬秦遣鞭耀敏榮槃澤爆碟磁禿纜輝霽鹵朵婁孜烽醬勃汀箕裘鉗耶懞蕾徹兌軟遭黜兎児韻媳爸兕觥兗兙兛兜售鍪肚兝兞兟兡兢兣樽殮涅睡稟籍贅泌啡肽奸幕涵澇熵疚眷稃襯訌赴煥椒殲植跏沒試誤猜棲窗肋袖頰兪卦撇鬍岐廓轎疸楓茴瓏廁秩募勺噸寓斤曆畝迫筷釐最淫螺韜兮寬匪篩襄贏軛複兲詐刃堰戎痞蟻餉它冀鑄冂冃円冇冉冊嫁厲礪竭醮冏牧冑冓冔冕冖冗冘冞冢窄抑誣冥冫烘菇蟄冷凝坨橇淇淋炭餅磚磧窖醋雕雹霜冱冶爐艷嘲峻灘淡漠煖颼飲冼冽凃凄愴梗凅凇凈凊凋敝濛凔凜遵汞脢凞几凢処凰凱凵凶焰凸摺刷紋預喪嘍奔巡榜殯芙蓉租籠輯鞘萃凼鋸鑊刁蠻刂娩崩批拆攤掰櫱驟歧顆秒袂贓勿囑忌磋琢膚刈羽刎訟戮舂槳艇刓刖霹靂刜創犢刡恙墅幟筵緻刦刧刨昏默攸尿慾薰潤薰圭刪刮痧鏟刱刲刳刴刵踏磅戳柏槐繡芹莧蝟舟銘鵠鶩刼剁剃辮剄剉履鉛剋剌姻咽哨廊掠桅沿召瞻翅趙卜渺茫郭剒剔剕瀝剚愎毅訥纔剜剝啄採剞剟剡剣剤綵剮腎駛黏剰袍剴紊剷剸剺剽剿劁劂劄劈啪柴扳啦劉奭姥夼昫涓熙禪禹錫翔雁鶚劊劌弩柄蜻蛉劒劓劖劘劙瀾簣賞磯釜晉甜薪逐劦熔紂虐赤囚劬劭労劵効劻劼劾峭艮勅勇勵勍勐臘脖龐漫飼盪粥輒勖勗勘驕餒碌泮雇捐竹騎殊阱勣樸懇謹勦勧勩勯勰勱勲勷勸懲慰誡諫勹芡踐闌匁庇拯粟紮袱裹餃匆遽匈匉匊匋匍匐莖匏匕妝痰膿蛹齋苑烤蹈塘羌熊閥螳螂疆碚竿緯荷茵邙魏匚匜匝匟扶稷匣匭攏匸匹耦匽匾匿卂叮瘡禧軫堤棚迢鈞鍊卄卆遐卉瓷盲瓶噹胱腱裸卋卌卍卐怯污賤鄙齷齪陋卓溪唐梯漁陳棗泥漳潯澗梨芬譙贍轅迦鄭単驢弈洽鰲卛占筮卝卞卟吩啉屎翠厄卣卨卪卬卮榫襖璽綬鈕蚤懼殆篤聳卲帘帙繞卹卼卽厂厎厓厔厖厗奚厘厙厜厝諒厠厤厥厪膩孢厮厰厳厴厹厺粕垢蕪菁厼厾叁悟茸薯叄吵笄悌哺譏坫壟弧芯杠潛嬰芻袁詰貪諜煽饋駁収岳締災賄騙叚叡吻攔蘑蜜訣燧玩硯箏椎藺銅逗驪另覓叨嘮謁杵姓喊嚷囂咚嚀塑尋惱憎擦祇泣滲蝠叱吒咄咤喝籀黛舵舷叵叶鐸懿昭穰苴遼叻叼吁塹嫖賭瞧爬衆抒吅吆夥巹橡滌抱縱摩郡唁墜扇籃膀襪頸吋愾諮酬哭妓媛暗錶韁邇妃羿絮蕃渾拐葵暮隅吔吖啶嗪戚吜嗇噬嚥吟哦詠吠吧唧嗒咐吪雋咀徵燐苞茹鈣哧吮吰吱嘎吲哚吳棟嬌窟孟簫忠晗淞闔閭趼宇吶睛噓拂捧疵熄竽笛糠吼吽呀呂韋矇呃呆笨呇貢呉罄呋喃呎呏呔呠呡癡呣呤呦呧瑛眩扒晬淑姬瑜璇鵑呪呫嗶嚅囁呬呯呰呱呲咧噌鈍呴呶呷呸呺呻哱咻嘯嚕籲坎坷邏呿咁咂咆哮咇咈咋蟹煦珅藹咍咑咒詛咔噠嚓咾噥哩喱咗咠咡咢咣咥咦咨嗟詢咩咪咫嚙齧咭咮咱咲咳嗆嗽咴咷咸咹咺咼喉咿婉慟憫賦矜綠茗藍哂搶瞞哆嗦囉噻啾濱彗哋哌哎唷喲哏哐哞哢哤哪裏哫啼喘哰哲萎蚌哳哶哽哿唄唅唆唈唉唎唏嘩堯棣殤璜睿肅唔睇唕唚唞唣喳唪唬唰喏唲唳唵嘛唶唸唹唻唼唾唿啁啃鸚鵡啅埠棧榷祺舖鞅飆啊啍啎啐啓啕啖啗啜啞祈啢啣啤啥啫啱啲啵啺饑啽噶崑沁喁喂喆裙喈嚨喋喌喎喑喒喓喔粗喙幛慶滋鵲喟喣喤喥喦喧騷喨喩梆喫葡萄喭駝挑嚇碰樅瓣純皰藻趟鉻喵営喹喺喼喿嗀嗃嗄嗅嗈嗉嗊嗍嗐嗑嗔詬嗕嗖嗙嗛嗜痂癖嗝嗡嗤嗥嗨嗩嗬嗯嗰嗲嗵嘰嗷嗹嗾嗿嘀嘁嘂嘅惋嘈峪禾蔭嘊嘌嘏嘐嘒嘓嘖嘚嘜嘞嘟囔嘣嘥嘦嘧嘬嘭這謔嚴敞饞鬆嘵嘶嘷嘸蝦嘹嘻嘽嘿噀噂噅噇噉噎噏噔噗噘噙噚噝噞噢噤蟬皿噩噫噭噯噱噲噳嚏涌灑欲巫霏噷噼嚃嚄嚆抖嚌嚐嚔囌嚚嚜嚞嚟嚦嚬嚭嚮嚯嚲嚳飭按竣苛嚵嚶囀囅囈膪謙囍囒囓囗囘蕭酚飄濺諦囝溯眸紇鑾鶻囟殉囡団囤囥囧囨囪囫圇囬囮囯囲図囶囷囸囹圄圉擬囻囿圀圂圃圊粹蠹赦圌墾圏滾鯡鑿枘圕圛圜圞坯埂壤骸炕祠窯豚紳魠鯪鱉圧握圩圪垯圬圮圯炸岬幔毯祇窨菩溉圳圴圻圾坂坆沾坋坌舛壈昆墊墩椅坒坓坩堝坭坰坱坳坴坵坻坼楊掙涎簾垃垈垌垍垓垔垕垗垚垛垝垣垞垟垤垧垮垵垺垾垿埀畔埄埆埇埈埌殃隍埏埒埕埗埜埡埤埦埧埭埯埰埲埳埴埵埶紼埸培怖樁礎輔埼埽堀訶姪廡堃堄摧磐貞韌砌堈堉堊堋堌堍堎堖堙堞堠礁堧堨輿堭堮蜓摘堲堳堽堿塁塄塈煤塋棵塍塏塒塓綢���鴉沽虱塙塚塝繆塡塢塤塥塩塬塱塲蟎塼塽塾塿墀墁墈墉墐夯増毀墝墠墦漬缽墫墬墮墰墺墻櫥壅壆壊壌壎壒榨蒜壔壕壖壙壚壜壝壠壡壬壭壱売壴壹壻壼寢壿夂夅夆変夊夌漱邑夓腕泄甥禦骼夗夘夙袞瑙妊娠醣梟珊鶯鷺戧幻魘夤蹀祕擂鶇姚宛閨嶼庾撻拇賛蛤裨菠氅漓撈湄蚊霆鯊箐篆篷荊肆舅荔鮃巷慚骰辟邱鎔鐮阪漂燴鯢鰈鱷鴇臚鵬妒峨譚枰晏璣癸祝秤竺牡籟恢罡螻蠍賜絨御梭夬夭砣榆怙枕夶夾餡奄崛葩譎奈賀祀贈奌奐奓奕訢詝奘奜奠奡奣陶奨奩魁奫奬奰媧孩貶隸酥宄狡猾她奼嫣妁氈荼皋膻蠅嬪妄妍嫉媚嬈妗趣妚妞妤礙妬婭妯娌妲妳妵妺姁姅姉姍姒姘姙姜姝姞姣姤姧姫姮娥姱姸姺姽婀娀誘懾脅娉婷娑娓娟娣娭娯娵娶娸娼婊婐婕婞婤婥谿孺婧婪婬婹婺婼婽媁媄媊媕媞媟媠媢媬媮媯媲媵媸媺媻媼眯媿嫄嫈嫋嫏嫕嫗嫘嫚嫜嫠嫡嫦嫩嫪毐嫫嫬嫰嫵嫺嫻嫽嫿嬀嬃嬅嬉耍嬋痴豔嬔嬖嬗嬙嬝嬡嬢嬤嬦嬬嬭幼嬲嬴嬸嬹嬾嬿孀孃孅孌孏曰癲屏孑孓雀孖斟簍謎摺孛矻鳩崮軻祜鸞孥邈毓棠臏孬孭孰孱孳孵泛罔銜孻孿宀宁宂拙株薇掣撫琪瓿榴謐彌宊濂祁瑕宍宏碁宓邸讞実潢町宥宧宨宬徵崎駿掖闕臊煮禽蠶宸豫寀寁寥寃簷庶寎暄磣寔寖寘寙寛寠苫寤肘洱濫蒗陝覈寪弘綽螽寳擅疙瘩晷対檐専尃尅贖絀繚疇釁尌峙醌襟痲碧屁昊槌淘恵瀑牝畑莓缸羚覷蔻髒躁尒尓銳尗尙尜尟尢尥尨尪尬尭尰擒尲尶尷尸尹潽蠖蛾尻釦梢蚴鰭脬蹲屇屌蚵屐屓挪屖屘屙屛屝屢屣巒嶂巖舄屧屨屩屪屭屮戍駐鉀崖嵛巔旮旯楂欖櫸芋茱萸靛麓屴屹屺屼岀岊岌岍阜岑彭鞏岒岝岢嵐岣岧岨岫岱岵岷峁峇峋峒峓峞峠嵋峩峯峱峴峹峿崀崁崆禎崋崌崍嶇崐崒崔嵬巍螢顥崚崞崟崠崢巆崤崦崧殂崬崱崳崴崶崿嵂嵇嵊泗嵌嵎嵒嵓嵗嵙嵞嵡嵩嵫嵯嵴嵼嵾嶁嶃嶄晴嶋嶌嶒嶓嶔嶗嶙嶝嶞嶠嶡嶢嶧嶨嶭嶮嶰嶲嶴嶸巂巃巇巉巋巌巓巘巛滇芎巟巠弋迴巣巤炊擘蜥蟒蠱覡巰蜀彥淖杏茂甫楞巻巽幗巿帛斐鯽蕊帑帔帗帚琉汶帟帡帣帨帬帯帰帷帹暆幃幄幇幋幌幏幘幙幚幞幠幡幢幦幨幩幪幬幭幯幰遙蹉跎餘庚鑑幵幷稚邃庀庁広庄庈庉笠庋跋庖犧庠庤庥鯨庬庱庳庴庵馨衢庹庿廃廄廆廋廌廎廏廐廑廒廕廖廛廝搏鑼廞弛袤廥廧廨廩廱綿踵髓廸廹甌鄴廻廼廾廿躔弁皺弇弌弍弎弐弒弔詭憾薦弝弢弣弤弨弭弮弰弳霖繇燾斌旭溥騫弶弸弼弾彀彄彆纍糾彊彔彖彘彟彠陌彤貽彧繪虹彪炳彫蔚鷗彰癉彲彳彴彷彷徉徨彸彽踩斂旆徂徇徊渭畬鉉裼従筌徘徙徜徠膳甦萌漸徬徭醺徯徳徴潘徻徼忀瘁胖燎怦悸顫扉犀澎湃砰恍惚絞隘忉憚挨餓忐忑忒忖応忝忞耿忡忪忭忮忱忸怩忻悠懣怏遏怔怗怚怛怞懟黍訝怫怭懦怱怲怳怵惕怸怹恁恂恇恉恌恏恒恓恔恘恚恛恝恞恟恠恣恧眄恪恫恬澹恰恿悀悁悃悄悆悊悐悒晦悚悛悜悝悤您悩悪悮悰悱悽惻悳悴悵惘悶悻悾惄愫鍾蒐惆惇惌惎惏惓惔惙惛耄惝瘧濁惥惦惪惲惴惷惸拈愀愃愆愈愊愍愐愑愒愓愔愕愙氓蠢騃昵愜赧愨愬愮愯愷愼慁慂慅慆慇靄慉慊慍慝慥慪慫慬慱慳慴慵慷慼焚憀灼鬱憃憊憋憍眺捏軾憒憔憖憙憧憬憨憪憭憮憯憷憸憹憺懃懅懆邀懊懋懌懍懐懞懠懤懥懨懫懮懰懱毖懵遁樑雍懺懽戁戄戇戉戔戕戛戝戞戠戡戢戣戤戥戦戩戭戯轟戱披菊牖戸戹戺戻戼戽鍬扂楔扃扆扈扊杖牽絹銬鐲賚扐摟攪烊盹瞌跟躉鑔靶鼾払扗玫腮扛扞扠扡扢盔押扤扦扱罾揄綏鞍郤窾扻扼扽抃抆抈抉抌抏瞎抔繯縊擻抜抝択抨摔歉躥牾抶抻搐泵菸拃拄拊髀拋拌脯拎拏拑擢秧沓曳攣迂拚拝拠拡拫拭拮踢拴拶拷攢拽掇芥橐簪摹疔挈瓢驥捺蹻挌挍挎挐揀挓挖掘浚挙揍聵挲挶挾挿捂捃捄捅捆捉捋胳膊揎捌捍捎軀蛛捗捘捙捜捥捩捫捭据捱捻捼捽掀掂掄臀膘掊掎掏掐笙掔掗掞棉芍掤搪闡掫掮掯揉掱掲掽掾揃揅揆搓揌諢揕揗揘揜揝揞揠揥揩揪揫櫫遒麈揰揲揵揶揸揹揺搆搉搊搋搌搎搔搕撼櫓搗搘搠搡搢搣搤搥搦搧搨搬楦褳訕赸搯搰搲搳搴搵搷搽搾搿摀摁摂摃摎摑摒摓跤摙摛摜摞摠摦睺羯摭摮摯摰摲摳摴摶摷摻摽撂撃撅稻撊撋撏鐧潑撕撙撚撝撟撢撣撦撧撩撬撱朔撳蚍蜉撾撿擀擄闖擉缶觚擐擕擖擗擡擣擤澡腚擧擨擩擫擭擯擰擷擸擼擽擿攃攄攆攉攥攐攓攖攙攛每攩攫轡澄攮攰攲攴軼攷砭訐攽碘敁敃敇敉敍敎筏敔敕敖閏誨敜煌敧敪敱敹敺敻敿斁衽斄牒縐謅斉斎斕鶉讕駮鱧斒筲斛斝斞斠斡斢斨斫斮晾沂潟穎絳邵斲斸釳於琅斾斿旀旂旃旄渦旌旎旐旒旓旖旛旝旟旡旣浴旰獺魃旴旹旻旼旽昀昃昄昇昉晰躲澈熹皎皓礬昑昕昜昝昞昡昤暉筍昦昨昰昱昳昴昶昺昻晁蹇隧蔬髦晄晅晒晛晜晞晟晡晢晤晥曦晩萘瑩顗晿暁暋暌暍暐暔暕煅暘暝暠暡曚暦暨暪朦朧暱暲殄馮暵暸暹暻暾曀曄曇曈曌曏曐曖曘曙曛曡曨曩駱曱甴肱曷牘禺錕曽滄耽朁朅朆杪栓誇竟粘絛朊膺朏朐朓朕朘朙瞄覲溘饔飧朠朢朣柵椆澱蝨朩朮朰朱炆璋鈺熾鹮朳槿朶朾朿杅杇杌隉欣釗湛漼楷瀍煜玟纓翱肈舜贄适逵杓杕杗杙荀蘅杝杞脩珓筊杰榔狍閦顰緬莞杲杳眇杴杶杸杻杼枋枌枒枓衾葄翹紓逋枙狸椏枟槁枲枳枴枵枷枸櫞枹枻柁柂柃柅柈柊柎某柑橘柒柘柙柚柜柞櫟柟柢柣柤柩柬柮柰柲橙柶柷柸柺査柿栃栄栒栔栘栝栟栢栩栫栭栱栲栳栴檀栵栻桀驁桁鎂桄桉桋桎梏椹葚桓桔桕桜桟桫欏桭桮桯桲桴桷桹湘溟梃梊梍梐潼梔梘梜梠梡梣梧梩梱梲梳梴梵梹棁棃櫻棐棑棕櫚簑繃蓑棖棘棜棨棩棪棫棬棯棰棱棳棸棹槨棼椀椄苕椈椊椋椌椐椑椓椗検椤椪椰椳椴椵椷椸椽椿楀楄楅篪楋楍楎楗楘楙楛楝楟楠楢楥楨楩楪楫楬楮楯楰楳楸楹楻楽榀榃榊榎槺榕榖榘榛狉莽榜笞榠榡榤榥榦榧榪榭榰榱槤霰榼榾榿槊閂槎槑槔槖様槜槢槥槧槪槭槮槱槲槻槼槾樆樊樏樑樕樗樘樛樟樠樧樨権樲樴樵猢猻樺樻罍樾樿橁橄橆橈笥龠橕橚橛輛橢橤橧豎膈跨橾橿檁檃檇檉檍檎檑檖檗檜檟檠檣檨檫檬檮檳檴檵檸櫂櫆櫌櫛櫜櫝櫡櫧櫨櫪櫬櫳櫹櫺茄櫽欀欂欃欐欑欒欙欞溴欨欬欱欵欶欷歔欸欹欻欼欿歁歃歆艎歈歊蒔蝶歓歕歘歙歛歜歟歠蹦詮鑲蹣跚陞陟歩歮歯歰歳歴璞歺瞑歾歿殀殈殍殑殗殜殙殛殞殢殣殥殪殫殭殰殳荃殷殸殹蛟殻殽謗毆毈毉餵毎毑蕈毗毘毚茛鄧毧毬毳毷毹毽毾毿氂氄氆靴氉氊氌氍氐聊氕氖気氘氙氚氛氜氝氡洶焊痙氤氳氥氦鋁鋅氪烴氬銨痤汪滸漉痘盂碾菖蒲蕹蛭螅氵氷氹氺氽燙氾氿渚汆汊汋汍汎汏汐汔汕褟汙汚汜蘺沼穢衊汧汨汩汭汲汳汴隄汾沄沅沆瀣沇沈葆浸淪湎溺痼痾沌沍沏沐沔沕沘浜畹礫沚沢沬沭沮沰沱灢沴沷籽沺烹濡洄泂肛泅泆湧肓泐泑泒泓泔泖泙泚泜泝泠漩饃濤粼濘蘚鰍泩泫泭泯銖泱泲洇洊涇琵琶荽薊箔洌洎洏洑潄濯洙洚洟洢洣洧洨洩痢滔洫洮洳洴洵洸洹洺洼洿淌蜚浄浉浙贛渫浠浡浤浥淼瀚浬浭翩萍浯浰蜃淀苔蛞蝓蜇螵蛸煲鯉浹浼浽溦涂涊涐涑涒涔滂涖涘涙涪涫涬涮涴涶涷涿淄淅淆淊淒黯淓淙漣淜淝淟淠淢淤淥淦淩猥藿褻淬淮淯淰淳詣淶紡淸淹燉癯綺渇済渉渋渓渕渙渟渢滓渤澥渧渨渮渰渲渶渼湅湉湋湍湑湓湔黔湜湝湞湟湢湣湩湫湮麟湱湲湴湼満溈溍溎溏溛舐漭溠溤溧馴溮溱溲溳溵溷溻溼溽溾滁滃滉滊滎滏稽滕滘滙滝滫滮羼耷滷滹滻煎漈漊漎繹漕漖漘漙漚漜漪漾漥漦漯漰漵漶漷濞潀潁潎潏潕潗潚潝潞潠潦祉瘍潲潵潷潸潺潾潿澁澂澃澉澌澍澐澒澔澙澠澣澦澧澨澫澬澮澰澴澶澼熏郁濆濇濈濉濊貊濔疣濜濠濩觴濬濮盥濰濲濼瀁瀅瀆瀋瀌瀏瀒瀔瀕瀘瀛瀟瀠瀡瀦瀧瀨瀬瀰瀲瀳瀵瀹瀺瀼灃灄灉灋灒灕灖灝灞灠灤灥灨灩灪蜴灮燼獴灴灸灺炁炅魷炗炘炙炤炫疽烙釺炯炰炱炲炴炷燬炻烀烋瘴鯧烓烔焙烜烝烳飪烺焃焄耆焌焐焓焗焜焞焠焢焮焯焱焼煁煃煆煇煊熠煍熬煐煒煕煗燻礆霾煚煝煟煠煢矸煨瑣煬萁煳煺煻熀熅熇熉羆熒穹熗熘熛熜稔諳爍熤熨熯熰眶螞熲熳熸熿燀燁燂燄盞燊燋燏燔隼燖燜燠燡燦燨燮燹燻燽燿爇爊爓爚爝爟爨蟾爯爰爲爻爿爿牀牁牂牄牋牎牏牓牕釉牚腩蒡虻牠雖蠣牣牤牮牯牲牳牴牷牸牼絆牿靬犂犄犆犇犉犍犎犒犖犗犛犟犠犨犩犪犮犰狳犴犵犺狁甩狃狆狎狒獾狘狙黠狨狩狫狴狷狺狻豕狽蜘猁猇猈猊猋猓猖獗猗猘猙獰獁猞猟獕猭猱猲猳猷猸猹猺玃獀獃獉獍獏獐獒獘獙獚獜獝獞獠獢獣獧鼇蹊獪獫獬豸獮獯鬻獳獷獼玀玁菟玅玆玈珉糝禛郅玍玎玓瓅玔玕玖玗玘玞玠玡玢玤玥玦玨瑰玭玳瑁玶玷玹玼珂珇珈瑚珌饈饌珔珖珙珛珞珡珣珥珧珩珪珮珶珷珺珽琀琁隕琊琇琖琚琠琤琦琨琫琬琭琮琯琰琱琲瑯琹琺琿瑀瑂瑄瑉瑋瑑瑔瑗瑢瑭瑱瑲瑳瑽瑾瑿璀璨璁璅璆璈璉璊璐璘璚璝璟璠璡璥璦璩璪璫璯璲璵璸璺璿瓀瓔瓖瓘瓚瓛臍瓞瓠瓤瓧瓩瓮瓰瓱瓴瓸瓻瓼甀甁甃甄甇甋甍甎甏甑甒甓甔甕甖甗飴蔗甙詫鉅粱盎銹糰甡褥産甪甬甭甮甯鎧甹甽甾甿畀畁畇畈畊畋畎畓畚畛畟鄂畤畦畧荻畯畳畵畷畸畽畾疃疉疋疍疎簞疐疒疕疘疝疢疥疧疳疶疿痁痄痊痌痍痏痐痒痔痗瘢痚痠痡痣痦痩痭痯痱痳痵痻痿瘀瘂瘃瘈瘉瘊瘌瘏瘐瘓瘕瘖瘙瘚瘛瘲瘜瘝瘞瘠瘥瘨瘭瘮瘯瘰癧瘳癘瘵瘸瘺瘻瘼癃癆癇癈癎癐癔癙癜癠癤癥癩蟆癪癭癰発踔紺蔫酵皙砬砒翎翳蘞鎢鑞皚鵯駒鱀粵褶皀皁莢皃鎛皈皌皐皒硃皕皖皘皜皝皞皤皦皨皪皫皭糙綻皴皸皻皽盅盋盌盍盚盝踞盦盩鞦韆盬盭眦睜瞤盯盱眙裰盵盻睞眂眅眈眊県眑眕眚眛眞眢眣眭眳眴眵眹瞓眽郛睃睅睆睊睍睎睏睒睖睙睟睠睢睥睪睪睯睽睾瞇瞈瞋瞍逛瞏瞕瞖瞘瞜瞟瞠瞢瞫瞭瞳瞵瞷瞹瞽闍瞿矓矉矍鑠矔矗矙矚矞矟矠矣矧矬矯矰矱硪碇磙��舫阡、矼矽礓砃砅砆砉砍砑砕砝砟砠砢砦砧砩砫砮砳艏砵砹砼硇硌硍硎硏硐硒硜硤硨磲茚鋇硭硻硾碃碉碏碣碓碔碞碡碪碫碬碭碯碲碸碻礡磈磉磎磑磔磕磖磛磟磠磡磤磥蹭磪磬磴磵磹磻磽礀礄礅礌礐礚礜礞礤礧礮礱礲礵礽礿祂祄祅祆禳祊祍祏祓祔祕祗祘祛祧祫祲祻祼餌臠錮禂禇禋禑禔禕隋禖禘禚禜禝禠禡禢禤禥禨禫禰禴禸稈秈秊闈颯秌秏秕笈蘵賃秠秣秪秫秬秭秷秸稊稌稍稑稗稙稛稞稬稭稲稹稼顙稾穂穄穇穈穉穋穌貯穏穜穟穠穡穣穤穧穨穭穮穵穸窿闃窀窂窅窆窈窕窊窋窌窒窓窔窞窣窬黷蹙窰窳窴窵窶窸窻竁竃竈竑竜竝竦竪篦篾笆鮫竾笉笊笎笏笐靨笓笤籙笪笫笭笮笰笱笲笳笵笸笻筀筅筇筈筎筑筘筠筤筥筦筧筩筭筯筰筱筳筴讌筸箂箇箊箎箑箒箘箙箛箜篌箝箠箬鏃箯箴箾篁篔簹篘篙篚篛篜篝篟篠篡篢篥篧篨篭篰篲篳篴篶篹篼簀簁簃簆簉簋簌簏簜簟簠簥簦簨簬簰簸簻籊籐籒籓籔籖籚籛籜籣籥籧籩籪籫籯芾麴籵籸籹籼粁粃粋粑粔糲粛粞粢粧粨粲粳粺粻粽闢粿糅糆糈糌糍糒糔萼糗蛆蹋糢糨糬糭糯糱糴糶糸糺紃蹼鰹黴紆紈絝紉閩襻紑紕紘錠鳶鷂紝紞紟紥紩紬紱紲紵紽紾紿絁絃絅経絍絎絏縭褵絓絖絘絜絢絣螯絪絫聒絰絵絶絺絻絿綀綃綅綆綈綉綌綍綎綑綖綘継続緞綣綦綪綫綮綯綰罟蝽綷縩綹綾緁緄緅緆緇緋緌緎総緑緔緖緗緘緙緜緡緤緥緦纂緪緰緱緲緶緹縁縃縄縈縉縋縏縑縕縗縚縝縞縟縠縡縢縦縧縯縰騁縲縳縴縵縶縹縻衙縿繄繅繈繊繋繐繒繖繘繙繠繢繣繨繮繰繸繻繾纁纆纇纈纉纊纑纕纘纙纚纛缾罃罆罈罋罌罎罏罖罘罛罝罠罣罥罦罨罫罭鍰罳罶罹罻罽罿羂羃羇羋蕉51鴕羑羖羗羜羝羢羣羥羧羭羮羰羱羵羶羸藜鮐翀翃翄翊翌翏翕翛翟翡翣翥翦躚翪翫翬翮翯翺翽翾翿闆饕鴰鍁耋耇耎耏耑耒耜耔耞耡耤耨耩耪耬耰鬢耵聹聃聆聎聝聡聦聱聴聶聼閾聿肄肏肐肕腋肙肜肟肧胛肫肬肭肰肴肵肸肼胊胍胏胑胔胗胙胝胠銓胤胦胩胬胭胯胰胲胴胹胻胼胾脇脘脝脞脡脣脤脥脧脰脲脳腆腊腌臢腍腒腓腖腜腠腡腥腧腬腯踝蹬鐐腴腶蠕誹膂膃膆膇膋膔膕膗膙膟黐膣膦膫膰膴膵膷膾臃臄臇臈臌臐臑臓臕臖臙臛臝臞臧蓐詡臽臾臿舀舁鰟鮍舋舎舔舗舘舝舠舡舢舨舭舲舳舴舸舺艁艄艅艉艋艑艕艖艗艘艚艜艟艣艤艨艩艫艬艭荏艴艶艸艹艻艿芃芄芊萰陂藭芏芔芘芚蕙芟芣芤茉芧芨芩芪芮芰鰱芴芷芸蕘豢芼芿苄苒苘苙苜蓿苠苡苣蕒苤苧苪鎊苶苹苺苻苾茀茁范蠡萣茆茇茈茌茍茖茞茠茢茥茦菰茭茯茳藨茷藘茼荁荄荅荇荈菅蜢鴞荍荑荘荳荵荸薺莆莒莔莕莘莙莚莛莜莝莦莨菪莩莪莭莰莿菀菆菉菎菏菐菑菓菔菕菘菝菡菢菣菥蓂菧菫轂鎣菶菷菹醢菺菻菼菾萅萆萇萋萏萐萑萜萩萱萴萵萹萻葇葍葎葑葒葖葙葠葥葦葧葭葯葳葴葶葸葹葽蒄蒎蒓蘢薹蒞蒟蒻蒢蒦蒨蒭藁蒯蒱鉾蒴蒹蒺蒽蓀蓁蓆蓇蓊蓌蓍蓏蓓蓖蓧蓪蓫蓽跣藕蓯蓰蓱蓴蓷蓺蓼蔀蔂蔃蔆蔇蔉蔊蔋蔌蔎蔕蔘蔙蔞蔟鍔蔣雯蔦蔯蔳蔴蔵蔸蔾蕁蕆蕋蕍蕎蕐蕑蕓蕕蕖蕗蕝蕞蕠蕡蕢蕣蕤蕨蕳蕷蕸蕺蕻薀薁薃薅薆薈薉薌薏薐薔薖薘薙諤釵薜薠薢薤薧薨薫薬薳薶薷薸薽薾薿藄藇藋藎藐藙藚藟藦藳藴藶藷藾蘀蘁蘄蘋蘗蘘蘝蘤蘧蘩蘸蘼虀虆虍蟠虒虓虖虡虣虥虩虯虰蛵虵虷鱒虺虼蚆蚈蚋蚓蚔蚖蚘蚜蚡蚣蚧蚨蚩蚪蚯蚰蜒蚱蚳蚶蚹蚺蚻蚿蛀蛁蛄蛅蝮蛌蛍蛐蟮蛑蛓蛔蛘蛚蛜蛡蛣蜊蛩蛺蛻螫蜅蜆蜈蝣蜋蜍蜎蜑蠊蜛餞蜞蜣蜨蜩蜮蜱蜷蜺蜾蜿蝀蝃蝋蝌蝍蝎蝏蝗蝘蝙蝝鱝蝡蝤蝥蝯蝰蝱蝲蝴蝻螃蠏螄螉螋螒螓螗螘螙螚蟥螟螣螥螬螭螮螾螿蟀蟅蟈蟊蟋蟑蟓蟛蟜蟟蟢蟣蟨蟪蟭蟯蟳蟶蟷蟺蟿蠁蠂蠃蠆蠋蠐蠓蠔蠗蠙蠚蠛蠜蠧蠨蠩蠭蠮蠰蠲蠵蠸蠼蠽衁衂衄衇衈衉衋衎衒衕衖衚衞裳鈎衭衲衵衹衺衿袈裟袗袚袟袢袪袮袲袴袷袺袼褙袽裀裉裊裋裌裍裎裒裛裯裱裲裴裾褀褂褉褊褌褎褐褒褓褔褕褘褚褡褢褦褧褪褫褭褯褰褱襠褸褽褾襁襃襆襇襉襋襌襏襚襛襜襝襞襡襢襤襦襫襬襭襮襴襶襼襽襾覂覃覅覇覉覊覌覗覘覚覜覥覦覧覩覬覯覰観覿觔觕觖觜觽觝觡酲觩觫觭觱觳觶觷觼觾觿言賅訃訇訏訑訒詁託訧訬訳訹証訾詀詅詆譭詈詊詎詑詒詖詗詘詧詨詵詶詸詹詻詼詿誂誃誄鋤誆誋誑誒誖誙誚誥誧説読誯誶誾諂諄諆諌諍諏諑諕諗諛諝諞諟諠諡諴諵諶諼謄謆謇謌謍謏謑謖謚謡謦謪謫謳謷謼謾譁譅譆譈譊譌譒譔譖鑫譞譟譩譫譬譱譲譴譸譹譾讅讆讋讌讎讐讒讖讙讜讟谽豁豉豇豈豊豋豌豏豔豞豖豗豜豝豣豦豨豭豱豳豵豶豷豺豻貅貆貍貎貔貘貙貜貤饜貰餸貺賁賂賏賒賕賙賝賡賧賨賫鬭賮賵賸賺賻賾贇贉贐贔贕贗赬赭赱赳迄趁趂趄趐趑趒趔趡趦趫趮趯趲趴趵趷趹趺趿跁跂跅跆躓蹌跐跕跖跗跙跛跦跧跩跫跬跮跱跲跴跺跼跽踅踆踈踉踊踒���踘踜踟躇躕踠踡踣踤踥踦踧蹺踫踮踰踱踴踶踹踺踼踽躞蹁蹂躪蹎蹐蹓蹔蹕蹚蹜蹝蹟蹠蹡蹢躂蹧蹩蹪蹯鞠蹽躃躄躅躊躋躐躑躒躘躙躛躝躠躡躦躧躩躭躰躳躶軃軆輥軏軔軘軜軝齶転軥軨軭軱軲轆軷軹軺軽軿輀輂輦輅輇輈輓輗輙輜輞輠輤輬輭輮輳輴輵輶輹輼輾轀轇轏轑轒轔轕轖轗轘轙轝轞轢轤辠辢辤辵辶辺込辿迅迋迍麿迓迣迤邐迥迨迮迸迺迻迿逄逅逌逍逑逓逕逖逡逭逯逴逶逹遄遅遉遘遛遝遢遨遫遯遰遴遶遹遻邂邅邉邋邎邕邗邘邛邠邢邧邨邯鄲邰邲邳邴邶邷邽邾邿郃郄郇郈郔郕郗郙郚郜郝郞郟郠郢郪郫郯郰郲郳郴郷郹郾郿鄀鄄鄆鄇鄈鄋鄍鄎鄏鄐鄑鄒鄔鄕鄖鄗鄘鄚鄜鄞鄠鄢鄣鄤鄦鄩鄫鄬鄮鄯鄱鄶鄷鄹鄺鄻鄾鄿酃酅酆酇酈酊酋酎酏酐酣酔酕醄酖酗酞酡酢酤酩酴酹酺醁醅醆醊醍醐醑醓醖醝醞醡醤醨醪醭醯醰醱醲醴醵醸醹醼醽醾釂釃釅釆釈鱸鎦閶釓釔釕鈀釙鼢鼴釤釧釪釬釭釱釷釸釹鈁鈃鈄鈆鈇鈈鈊鈌鈐鈑鈒鈤鈥鈧鈬鈮鈰鈳鐺鈸鈹鈽鈿鉄鉆鉈鉋鉌鉍鉏鉑鉕鉚鉢鉥鉦鉨鉬鉭鉱鉲鉶鉸鉺鉼鉿銍銎銑銕鏤銚銛銠銣銤銥銦銧銩銪銫銭銰銲銶銻銼銾鋂鋃鋆鋈鋊鋌鋍鋏鋐鋑鋕鋘鋙鋝鋟鋦鋨鋩鋭鋮鋯鋰鋱鋳鋹鋺鋻鏰鐱錀錁錆錇錈錍錏錒錔錙錚錛錞錟錡錤錩錬録錸錼鍀鍆鍇鍉鍍鍏鍐鍘鍚鍛鍠鍤鍥鍩鍫鍭鍱鍴鍶鍹鍺鍼鍾鎄鎇鎉鎋鎌鎍鎏鎒鎓鎗鎘鎚鎞鎡鎤鎩鎪鎭鎯鎰鎳鎴鎵鎸鎹鎿鏇鏊鏌鏐鏑鏖鏗鏘鏚鏜鏝鏞鏠鏦鏨鏷鏸鏹鏻鏽鏾鐃鐄鐇鐏鐒鐓鐔鐗馗鐙鐝鐠鐡鐦鐨鐩鐫鐬鐱鐳鐶鐻鐽鐿鑀鑅鑌鑐鑕鑚鑛鑢鑤鑥鑪鑭鑯鑱鑴鑵鑷钁钃镻閆閈閌閎閒閔閗閟閡関閤閤閧閬閲閹閺閻閼閽閿闇闉闋闐闑闒闓闘闚闞闟闠闤闥阞阢阤阨阬阯阹阼阽陁陑陔陛陜陡陥陬騭陴険陼陾隂隃隈隒隗隞隠隣隤隩隮隰顴隳隷隹雂雈雉雊雎雑雒雗雘雚雝雟雩雰雱驛霂霅霈霊霑霒霓霙霝霢霣霤霨霩霪霫霮靁靆靉靑靚靣靦靪靮靰靳靷靸靺靼靿鞀鞃鞄鞌鞗鞙鞚鞝鞞鞡鞣鞨鞫鞬鞮鞶鞹鞾韃韅韉馱韍韎韔韖韘韝韞韡韣韭韮韱韹韺頀颳頄頇頊頍頎頏頒頖頞頠頫頬顱頯頲頴頼顇顋顑顒顓顔顕顚顜顢顣顬顳颭颮颱颶颸颺颻颽颾颿飀飂飈飌飜飡飣飤飥飩飫飮飱飶餀餂餄餎餇餈餑餔餕餖餗餚餛餜餟餠餤餧餩餪餫餬餮餱餲餳餺餻餼餽餿饁饅饇饉饊饍饎饐饘饟饢馘馥馝馡馣騮騾馵馹駃駄駅駆駉駋駑駓駔駗駘駙駜駡駢駪駬駰駴駸駹駽駾騂騄騅騆騉騋騍騏驎騑騒験騕騖騠騢騣騤騧驤騵騶騸騺驀驂驃驄驆驈驊驌驍驎驏驒驔驖驙驦驩驫骺鯁骫骭骯骱骴骶骷髏骾髁髂髄髆髈髐髑髕髖髙髝髞髟髡髣髧髪髫髭髯髲髳髹髺髽髾鬁鬃鬅鬈鬋鬎鬏鬐鬑鬒鬖鬗鬘鬙鬠鬣鬪鬫鬬鬮鬯鬰鬲鬵鬷魆魈魊魋魍魎魑魖鰾魛魟魣魦魨魬魴魵魸鮀鮁鮆鮌鮎鮑鮒鮓鮚鮞鮟鱇鮠鮦鮨鮪鮭鮶鮸鮿鯀鯄鯆鯇鯈鯔鯕鯖鯗鯙鯠鯤鯥鯫鯰鯷鯸鯿鰂鰆鶼鰉鰋鰐鰒鰕鰛鰜鰣鰤鰥鰦鰨鰩鰮鰳鰶鰷鱺鰼鰽鱀鱄鱅鱆鱈鱎鱐鱓鱔鱖鱘鱟鱠鱣鱨鱭鱮鱲鱵鱻鲅鳦鳧鳯鳲鳷鳻鴂鴃鴄鴆鴈鴎鴒鴔鴗鴛鴦鴝鵒鴟鴠鴢鴣鴥鴯鶓鴳鴴鴷鴽鵀鵁鵂鵓鵖鵙鵜鶘鵞鵟鵩鵪鵫鵵鵷鵻鵾鶂鶊鶏鶒鶖鶗鶡鶤鶦鶬鶱鶲鶵鶸鶹鶺鶿鷀鷁鷃鷄鷇鷈鷉鷊鷏鷓鷕鷖鷙鷞鷟鷥鷦鷯鷩鷫鷭鷳鷴鷽鷾鷿鸂鸇鸊鸏鸑鸒鸓鸕鸛鸜鸝鹸鹹鹺麀麂麃麄麇麋麌麐麑麒麚麛麝麤麩麪麫麮麯麰麺麾黁黈黌黢黒黓黕黙黝黟黥黦黧黮黰黱黲黶黹黻黼黽黿鼂鼃鼅鼈鼉鼏鼐鼒鼕鼖鼙鼚鼛鼡鼩鼱鼪鼫鼯鼷鼽齁齆齇齈齉齌齎齏齔齕齗齙齚齜齞齟齬齠齢齣齧齩齮齯齰齱齵齾龎龑龒龔龖龘龝龡龢龤'
|
20 |
-
|
21 |
-
assert len(simplified_charcters) == len(simplified_charcters)
|
22 |
-
|
23 |
-
s2t_dict = {}
|
24 |
-
t2s_dict = {}
|
25 |
-
for i, item in enumerate(simplified_charcters):
|
26 |
-
s2t_dict[item] = traditional_characters[i]
|
27 |
-
t2s_dict[traditional_characters[i]] = item
|
28 |
-
|
29 |
-
|
30 |
-
def tranditional_to_simplified(text: str) -> str:
|
31 |
-
return "".join(
|
32 |
-
[t2s_dict[item] if item in t2s_dict else item for item in text])
|
33 |
-
|
34 |
-
|
35 |
-
def simplified_to_traditional(text: str) -> str:
|
36 |
-
return "".join(
|
37 |
-
[s2t_dict[item] if item in s2t_dict else item for item in text])
|
38 |
-
|
39 |
-
|
40 |
-
if __name__ == "__main__":
|
41 |
-
text = "一般是指存取一個應用程式啟動時始終顯示在網站或網頁瀏覽器中的一個或多個初始網頁等畫面存在的站點"
|
42 |
-
print(text)
|
43 |
-
text_simple = tranditional_to_simplified(text)
|
44 |
-
print(text_simple)
|
45 |
-
text_traditional = simplified_to_traditional(text_simple)
|
46 |
-
print(text_traditional)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/An-619/FastSAM/utils/__init__.py
DELETED
File without changes
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_inpaint.py
DELETED
@@ -1,1138 +0,0 @@
|
|
1 |
-
# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
|
2 |
-
|
3 |
-
import inspect
|
4 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import PIL.Image
|
8 |
-
import torch
|
9 |
-
import torch.nn.functional as F
|
10 |
-
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
|
11 |
-
|
12 |
-
from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
|
13 |
-
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
14 |
-
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
|
15 |
-
from diffusers.schedulers import KarrasDiffusionSchedulers
|
16 |
-
from diffusers.utils import (
|
17 |
-
PIL_INTERPOLATION,
|
18 |
-
is_accelerate_available,
|
19 |
-
is_accelerate_version,
|
20 |
-
randn_tensor,
|
21 |
-
replace_example_docstring,
|
22 |
-
)
|
23 |
-
|
24 |
-
|
25 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
26 |
-
|
27 |
-
EXAMPLE_DOC_STRING = """
|
28 |
-
Examples:
|
29 |
-
```py
|
30 |
-
>>> import numpy as np
|
31 |
-
>>> import torch
|
32 |
-
>>> from PIL import Image
|
33 |
-
>>> from stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
|
34 |
-
|
35 |
-
>>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
|
36 |
-
>>> from diffusers import ControlNetModel, UniPCMultistepScheduler
|
37 |
-
>>> from diffusers.utils import load_image
|
38 |
-
|
39 |
-
>>> def ade_palette():
|
40 |
-
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
41 |
-
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
42 |
-
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
43 |
-
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
44 |
-
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
45 |
-
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
46 |
-
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
47 |
-
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
48 |
-
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
49 |
-
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
50 |
-
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
51 |
-
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
52 |
-
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
53 |
-
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
54 |
-
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
55 |
-
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
56 |
-
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
57 |
-
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
58 |
-
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
59 |
-
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
60 |
-
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
61 |
-
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
62 |
-
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
63 |
-
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
64 |
-
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
65 |
-
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
66 |
-
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
67 |
-
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
68 |
-
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
69 |
-
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
70 |
-
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
71 |
-
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
72 |
-
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
73 |
-
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
74 |
-
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
75 |
-
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
76 |
-
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
77 |
-
[102, 255, 0], [92, 0, 255]]
|
78 |
-
|
79 |
-
>>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
|
80 |
-
>>> image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
|
81 |
-
|
82 |
-
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
|
83 |
-
|
84 |
-
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
85 |
-
"runwayml/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
|
86 |
-
)
|
87 |
-
|
88 |
-
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
89 |
-
>>> pipe.enable_xformers_memory_efficient_attention()
|
90 |
-
>>> pipe.enable_model_cpu_offload()
|
91 |
-
|
92 |
-
>>> def image_to_seg(image):
|
93 |
-
pixel_values = image_processor(image, return_tensors="pt").pixel_values
|
94 |
-
with torch.no_grad():
|
95 |
-
outputs = image_segmentor(pixel_values)
|
96 |
-
seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
|
97 |
-
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3
|
98 |
-
palette = np.array(ade_palette())
|
99 |
-
for label, color in enumerate(palette):
|
100 |
-
color_seg[seg == label, :] = color
|
101 |
-
color_seg = color_seg.astype(np.uint8)
|
102 |
-
seg_image = Image.fromarray(color_seg)
|
103 |
-
return seg_image
|
104 |
-
|
105 |
-
>>> image = load_image(
|
106 |
-
"https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
|
107 |
-
)
|
108 |
-
|
109 |
-
>>> mask_image = load_image(
|
110 |
-
"https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
|
111 |
-
)
|
112 |
-
|
113 |
-
>>> controlnet_conditioning_image = image_to_seg(image)
|
114 |
-
|
115 |
-
>>> image = pipe(
|
116 |
-
"Face of a yellow cat, high resolution, sitting on a park bench",
|
117 |
-
image,
|
118 |
-
mask_image,
|
119 |
-
controlnet_conditioning_image,
|
120 |
-
num_inference_steps=20,
|
121 |
-
).images[0]
|
122 |
-
|
123 |
-
>>> image.save("out.png")
|
124 |
-
```
|
125 |
-
"""
|
126 |
-
|
127 |
-
|
128 |
-
def prepare_image(image):
|
129 |
-
if isinstance(image, torch.Tensor):
|
130 |
-
# Batch single image
|
131 |
-
if image.ndim == 3:
|
132 |
-
image = image.unsqueeze(0)
|
133 |
-
|
134 |
-
image = image.to(dtype=torch.float32)
|
135 |
-
else:
|
136 |
-
# preprocess image
|
137 |
-
if isinstance(image, (PIL.Image.Image, np.ndarray)):
|
138 |
-
image = [image]
|
139 |
-
|
140 |
-
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
|
141 |
-
image = [np.array(i.convert("RGB"))[None, :] for i in image]
|
142 |
-
image = np.concatenate(image, axis=0)
|
143 |
-
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
|
144 |
-
image = np.concatenate([i[None, :] for i in image], axis=0)
|
145 |
-
|
146 |
-
image = image.transpose(0, 3, 1, 2)
|
147 |
-
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
|
148 |
-
|
149 |
-
return image
|
150 |
-
|
151 |
-
|
152 |
-
def prepare_mask_image(mask_image):
|
153 |
-
if isinstance(mask_image, torch.Tensor):
|
154 |
-
if mask_image.ndim == 2:
|
155 |
-
# Batch and add channel dim for single mask
|
156 |
-
mask_image = mask_image.unsqueeze(0).unsqueeze(0)
|
157 |
-
elif mask_image.ndim == 3 and mask_image.shape[0] == 1:
|
158 |
-
# Single mask, the 0'th dimension is considered to be
|
159 |
-
# the existing batch size of 1
|
160 |
-
mask_image = mask_image.unsqueeze(0)
|
161 |
-
elif mask_image.ndim == 3 and mask_image.shape[0] != 1:
|
162 |
-
# Batch of mask, the 0'th dimension is considered to be
|
163 |
-
# the batching dimension
|
164 |
-
mask_image = mask_image.unsqueeze(1)
|
165 |
-
|
166 |
-
# Binarize mask
|
167 |
-
mask_image[mask_image < 0.5] = 0
|
168 |
-
mask_image[mask_image >= 0.5] = 1
|
169 |
-
else:
|
170 |
-
# preprocess mask
|
171 |
-
if isinstance(mask_image, (PIL.Image.Image, np.ndarray)):
|
172 |
-
mask_image = [mask_image]
|
173 |
-
|
174 |
-
if isinstance(mask_image, list) and isinstance(mask_image[0], PIL.Image.Image):
|
175 |
-
mask_image = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask_image], axis=0)
|
176 |
-
mask_image = mask_image.astype(np.float32) / 255.0
|
177 |
-
elif isinstance(mask_image, list) and isinstance(mask_image[0], np.ndarray):
|
178 |
-
mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0)
|
179 |
-
|
180 |
-
mask_image[mask_image < 0.5] = 0
|
181 |
-
mask_image[mask_image >= 0.5] = 1
|
182 |
-
mask_image = torch.from_numpy(mask_image)
|
183 |
-
|
184 |
-
return mask_image
|
185 |
-
|
186 |
-
|
187 |
-
def prepare_controlnet_conditioning_image(
|
188 |
-
controlnet_conditioning_image,
|
189 |
-
width,
|
190 |
-
height,
|
191 |
-
batch_size,
|
192 |
-
num_images_per_prompt,
|
193 |
-
device,
|
194 |
-
dtype,
|
195 |
-
do_classifier_free_guidance,
|
196 |
-
):
|
197 |
-
if not isinstance(controlnet_conditioning_image, torch.Tensor):
|
198 |
-
if isinstance(controlnet_conditioning_image, PIL.Image.Image):
|
199 |
-
controlnet_conditioning_image = [controlnet_conditioning_image]
|
200 |
-
|
201 |
-
if isinstance(controlnet_conditioning_image[0], PIL.Image.Image):
|
202 |
-
controlnet_conditioning_image = [
|
203 |
-
np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :]
|
204 |
-
for i in controlnet_conditioning_image
|
205 |
-
]
|
206 |
-
controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0)
|
207 |
-
controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0
|
208 |
-
controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2)
|
209 |
-
controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image)
|
210 |
-
elif isinstance(controlnet_conditioning_image[0], torch.Tensor):
|
211 |
-
controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0)
|
212 |
-
|
213 |
-
image_batch_size = controlnet_conditioning_image.shape[0]
|
214 |
-
|
215 |
-
if image_batch_size == 1:
|
216 |
-
repeat_by = batch_size
|
217 |
-
else:
|
218 |
-
# image batch size is the same as prompt batch size
|
219 |
-
repeat_by = num_images_per_prompt
|
220 |
-
|
221 |
-
controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0)
|
222 |
-
|
223 |
-
controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
|
224 |
-
|
225 |
-
if do_classifier_free_guidance:
|
226 |
-
controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
|
227 |
-
|
228 |
-
return controlnet_conditioning_image
|
229 |
-
|
230 |
-
|
231 |
-
class StableDiffusionControlNetInpaintPipeline(DiffusionPipeline):
|
232 |
-
"""
|
233 |
-
Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
|
234 |
-
"""
|
235 |
-
|
236 |
-
_optional_components = ["safety_checker", "feature_extractor"]
|
237 |
-
|
238 |
-
def __init__(
|
239 |
-
self,
|
240 |
-
vae: AutoencoderKL,
|
241 |
-
text_encoder: CLIPTextModel,
|
242 |
-
tokenizer: CLIPTokenizer,
|
243 |
-
unet: UNet2DConditionModel,
|
244 |
-
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
|
245 |
-
scheduler: KarrasDiffusionSchedulers,
|
246 |
-
safety_checker: StableDiffusionSafetyChecker,
|
247 |
-
feature_extractor: CLIPImageProcessor,
|
248 |
-
requires_safety_checker: bool = True,
|
249 |
-
):
|
250 |
-
super().__init__()
|
251 |
-
|
252 |
-
if safety_checker is None and requires_safety_checker:
|
253 |
-
logger.warning(
|
254 |
-
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
255 |
-
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
256 |
-
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
257 |
-
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
258 |
-
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
259 |
-
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
260 |
-
)
|
261 |
-
|
262 |
-
if safety_checker is not None and feature_extractor is None:
|
263 |
-
raise ValueError(
|
264 |
-
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
|
265 |
-
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
|
266 |
-
)
|
267 |
-
|
268 |
-
if isinstance(controlnet, (list, tuple)):
|
269 |
-
controlnet = MultiControlNetModel(controlnet)
|
270 |
-
|
271 |
-
self.register_modules(
|
272 |
-
vae=vae,
|
273 |
-
text_encoder=text_encoder,
|
274 |
-
tokenizer=tokenizer,
|
275 |
-
unet=unet,
|
276 |
-
controlnet=controlnet,
|
277 |
-
scheduler=scheduler,
|
278 |
-
safety_checker=safety_checker,
|
279 |
-
feature_extractor=feature_extractor,
|
280 |
-
)
|
281 |
-
|
282 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
|
283 |
-
self.register_to_config(requires_safety_checker=requires_safety_checker)
|
284 |
-
|
285 |
-
def enable_vae_slicing(self):
|
286 |
-
r"""
|
287 |
-
Enable sliced VAE decoding.
|
288 |
-
|
289 |
-
When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
|
290 |
-
steps. This is useful to save some memory and allow larger batch sizes.
|
291 |
-
"""
|
292 |
-
self.vae.enable_slicing()
|
293 |
-
|
294 |
-
def disable_vae_slicing(self):
|
295 |
-
r"""
|
296 |
-
Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
|
297 |
-
computing decoding in one step.
|
298 |
-
"""
|
299 |
-
self.vae.disable_slicing()
|
300 |
-
|
301 |
-
def enable_sequential_cpu_offload(self, gpu_id=0):
|
302 |
-
r"""
|
303 |
-
Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
|
304 |
-
text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
|
305 |
-
`torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
|
306 |
-
Note that offloading happens on a submodule basis. Memory savings are higher than with
|
307 |
-
`enable_model_cpu_offload`, but performance is lower.
|
308 |
-
"""
|
309 |
-
if is_accelerate_available():
|
310 |
-
from accelerate import cpu_offload
|
311 |
-
else:
|
312 |
-
raise ImportError("Please install accelerate via `pip install accelerate`")
|
313 |
-
|
314 |
-
device = torch.device(f"cuda:{gpu_id}")
|
315 |
-
|
316 |
-
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
|
317 |
-
cpu_offload(cpu_offloaded_model, device)
|
318 |
-
|
319 |
-
if self.safety_checker is not None:
|
320 |
-
cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
|
321 |
-
|
322 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
323 |
-
r"""
|
324 |
-
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
325 |
-
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
326 |
-
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
327 |
-
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
328 |
-
"""
|
329 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
330 |
-
from accelerate import cpu_offload_with_hook
|
331 |
-
else:
|
332 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
333 |
-
|
334 |
-
device = torch.device(f"cuda:{gpu_id}")
|
335 |
-
|
336 |
-
hook = None
|
337 |
-
for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
|
338 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
339 |
-
|
340 |
-
if self.safety_checker is not None:
|
341 |
-
# the safety checker can offload the vae again
|
342 |
-
_, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
|
343 |
-
|
344 |
-
# control net hook has be manually offloaded as it alternates with unet
|
345 |
-
cpu_offload_with_hook(self.controlnet, device)
|
346 |
-
|
347 |
-
# We'll offload the last model manually.
|
348 |
-
self.final_offload_hook = hook
|
349 |
-
|
350 |
-
@property
|
351 |
-
def _execution_device(self):
|
352 |
-
r"""
|
353 |
-
Returns the device on which the pipeline's models will be executed. After calling
|
354 |
-
`pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
|
355 |
-
hooks.
|
356 |
-
"""
|
357 |
-
if not hasattr(self.unet, "_hf_hook"):
|
358 |
-
return self.device
|
359 |
-
for module in self.unet.modules():
|
360 |
-
if (
|
361 |
-
hasattr(module, "_hf_hook")
|
362 |
-
and hasattr(module._hf_hook, "execution_device")
|
363 |
-
and module._hf_hook.execution_device is not None
|
364 |
-
):
|
365 |
-
return torch.device(module._hf_hook.execution_device)
|
366 |
-
return self.device
|
367 |
-
|
368 |
-
def _encode_prompt(
|
369 |
-
self,
|
370 |
-
prompt,
|
371 |
-
device,
|
372 |
-
num_images_per_prompt,
|
373 |
-
do_classifier_free_guidance,
|
374 |
-
negative_prompt=None,
|
375 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
376 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
377 |
-
):
|
378 |
-
r"""
|
379 |
-
Encodes the prompt into text encoder hidden states.
|
380 |
-
|
381 |
-
Args:
|
382 |
-
prompt (`str` or `List[str]`, *optional*):
|
383 |
-
prompt to be encoded
|
384 |
-
device: (`torch.device`):
|
385 |
-
torch device
|
386 |
-
num_images_per_prompt (`int`):
|
387 |
-
number of images that should be generated per prompt
|
388 |
-
do_classifier_free_guidance (`bool`):
|
389 |
-
whether to use classifier free guidance or not
|
390 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
391 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
|
392 |
-
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
393 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
394 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
395 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
396 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
397 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
398 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
399 |
-
argument.
|
400 |
-
"""
|
401 |
-
if prompt is not None and isinstance(prompt, str):
|
402 |
-
batch_size = 1
|
403 |
-
elif prompt is not None and isinstance(prompt, list):
|
404 |
-
batch_size = len(prompt)
|
405 |
-
else:
|
406 |
-
batch_size = prompt_embeds.shape[0]
|
407 |
-
|
408 |
-
if prompt_embeds is None:
|
409 |
-
text_inputs = self.tokenizer(
|
410 |
-
prompt,
|
411 |
-
padding="max_length",
|
412 |
-
max_length=self.tokenizer.model_max_length,
|
413 |
-
truncation=True,
|
414 |
-
return_tensors="pt",
|
415 |
-
)
|
416 |
-
text_input_ids = text_inputs.input_ids
|
417 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
418 |
-
|
419 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
|
420 |
-
text_input_ids, untruncated_ids
|
421 |
-
):
|
422 |
-
removed_text = self.tokenizer.batch_decode(
|
423 |
-
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
|
424 |
-
)
|
425 |
-
logger.warning(
|
426 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
427 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
428 |
-
)
|
429 |
-
|
430 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
431 |
-
attention_mask = text_inputs.attention_mask.to(device)
|
432 |
-
else:
|
433 |
-
attention_mask = None
|
434 |
-
|
435 |
-
prompt_embeds = self.text_encoder(
|
436 |
-
text_input_ids.to(device),
|
437 |
-
attention_mask=attention_mask,
|
438 |
-
)
|
439 |
-
prompt_embeds = prompt_embeds[0]
|
440 |
-
|
441 |
-
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
442 |
-
|
443 |
-
bs_embed, seq_len, _ = prompt_embeds.shape
|
444 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
445 |
-
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
446 |
-
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
447 |
-
|
448 |
-
# get unconditional embeddings for classifier free guidance
|
449 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
450 |
-
uncond_tokens: List[str]
|
451 |
-
if negative_prompt is None:
|
452 |
-
uncond_tokens = [""] * batch_size
|
453 |
-
elif type(prompt) is not type(negative_prompt):
|
454 |
-
raise TypeError(
|
455 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
456 |
-
f" {type(prompt)}."
|
457 |
-
)
|
458 |
-
elif isinstance(negative_prompt, str):
|
459 |
-
uncond_tokens = [negative_prompt]
|
460 |
-
elif batch_size != len(negative_prompt):
|
461 |
-
raise ValueError(
|
462 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
463 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
464 |
-
" the batch size of `prompt`."
|
465 |
-
)
|
466 |
-
else:
|
467 |
-
uncond_tokens = negative_prompt
|
468 |
-
|
469 |
-
max_length = prompt_embeds.shape[1]
|
470 |
-
uncond_input = self.tokenizer(
|
471 |
-
uncond_tokens,
|
472 |
-
padding="max_length",
|
473 |
-
max_length=max_length,
|
474 |
-
truncation=True,
|
475 |
-
return_tensors="pt",
|
476 |
-
)
|
477 |
-
|
478 |
-
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
|
479 |
-
attention_mask = uncond_input.attention_mask.to(device)
|
480 |
-
else:
|
481 |
-
attention_mask = None
|
482 |
-
|
483 |
-
negative_prompt_embeds = self.text_encoder(
|
484 |
-
uncond_input.input_ids.to(device),
|
485 |
-
attention_mask=attention_mask,
|
486 |
-
)
|
487 |
-
negative_prompt_embeds = negative_prompt_embeds[0]
|
488 |
-
|
489 |
-
if do_classifier_free_guidance:
|
490 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
491 |
-
seq_len = negative_prompt_embeds.shape[1]
|
492 |
-
|
493 |
-
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
|
494 |
-
|
495 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
|
496 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
|
497 |
-
|
498 |
-
# For classifier free guidance, we need to do two forward passes.
|
499 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
500 |
-
# to avoid doing two forward passes
|
501 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
502 |
-
|
503 |
-
return prompt_embeds
|
504 |
-
|
505 |
-
def run_safety_checker(self, image, device, dtype):
|
506 |
-
if self.safety_checker is not None:
|
507 |
-
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
|
508 |
-
image, has_nsfw_concept = self.safety_checker(
|
509 |
-
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
|
510 |
-
)
|
511 |
-
else:
|
512 |
-
has_nsfw_concept = None
|
513 |
-
return image, has_nsfw_concept
|
514 |
-
|
515 |
-
def decode_latents(self, latents):
|
516 |
-
latents = 1 / self.vae.config.scaling_factor * latents
|
517 |
-
image = self.vae.decode(latents).sample
|
518 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
519 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
|
520 |
-
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
521 |
-
return image
|
522 |
-
|
523 |
-
def prepare_extra_step_kwargs(self, generator, eta):
|
524 |
-
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
525 |
-
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
526 |
-
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
527 |
-
# and should be between [0, 1]
|
528 |
-
|
529 |
-
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
530 |
-
extra_step_kwargs = {}
|
531 |
-
if accepts_eta:
|
532 |
-
extra_step_kwargs["eta"] = eta
|
533 |
-
|
534 |
-
# check if the scheduler accepts generator
|
535 |
-
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
536 |
-
if accepts_generator:
|
537 |
-
extra_step_kwargs["generator"] = generator
|
538 |
-
return extra_step_kwargs
|
539 |
-
|
540 |
-
def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds):
|
541 |
-
image_is_pil = isinstance(image, PIL.Image.Image)
|
542 |
-
image_is_tensor = isinstance(image, torch.Tensor)
|
543 |
-
image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
|
544 |
-
image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
|
545 |
-
|
546 |
-
if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
|
547 |
-
raise TypeError(
|
548 |
-
"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
|
549 |
-
)
|
550 |
-
|
551 |
-
if image_is_pil:
|
552 |
-
image_batch_size = 1
|
553 |
-
elif image_is_tensor:
|
554 |
-
image_batch_size = image.shape[0]
|
555 |
-
elif image_is_pil_list:
|
556 |
-
image_batch_size = len(image)
|
557 |
-
elif image_is_tensor_list:
|
558 |
-
image_batch_size = len(image)
|
559 |
-
else:
|
560 |
-
raise ValueError("controlnet condition image is not valid")
|
561 |
-
|
562 |
-
if prompt is not None and isinstance(prompt, str):
|
563 |
-
prompt_batch_size = 1
|
564 |
-
elif prompt is not None and isinstance(prompt, list):
|
565 |
-
prompt_batch_size = len(prompt)
|
566 |
-
elif prompt_embeds is not None:
|
567 |
-
prompt_batch_size = prompt_embeds.shape[0]
|
568 |
-
else:
|
569 |
-
raise ValueError("prompt or prompt_embeds are not valid")
|
570 |
-
|
571 |
-
if image_batch_size != 1 and image_batch_size != prompt_batch_size:
|
572 |
-
raise ValueError(
|
573 |
-
f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
|
574 |
-
)
|
575 |
-
|
576 |
-
def check_inputs(
|
577 |
-
self,
|
578 |
-
prompt,
|
579 |
-
image,
|
580 |
-
mask_image,
|
581 |
-
controlnet_conditioning_image,
|
582 |
-
height,
|
583 |
-
width,
|
584 |
-
callback_steps,
|
585 |
-
negative_prompt=None,
|
586 |
-
prompt_embeds=None,
|
587 |
-
negative_prompt_embeds=None,
|
588 |
-
controlnet_conditioning_scale=None,
|
589 |
-
):
|
590 |
-
if height % 8 != 0 or width % 8 != 0:
|
591 |
-
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
592 |
-
|
593 |
-
if (callback_steps is None) or (
|
594 |
-
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
595 |
-
):
|
596 |
-
raise ValueError(
|
597 |
-
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
598 |
-
f" {type(callback_steps)}."
|
599 |
-
)
|
600 |
-
|
601 |
-
if prompt is not None and prompt_embeds is not None:
|
602 |
-
raise ValueError(
|
603 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
604 |
-
" only forward one of the two."
|
605 |
-
)
|
606 |
-
elif prompt is None and prompt_embeds is None:
|
607 |
-
raise ValueError(
|
608 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
609 |
-
)
|
610 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
611 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
612 |
-
|
613 |
-
if negative_prompt is not None and negative_prompt_embeds is not None:
|
614 |
-
raise ValueError(
|
615 |
-
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
|
616 |
-
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
|
617 |
-
)
|
618 |
-
|
619 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
620 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
621 |
-
raise ValueError(
|
622 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
623 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
624 |
-
f" {negative_prompt_embeds.shape}."
|
625 |
-
)
|
626 |
-
|
627 |
-
# check controlnet condition image
|
628 |
-
if isinstance(self.controlnet, ControlNetModel):
|
629 |
-
self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds)
|
630 |
-
elif isinstance(self.controlnet, MultiControlNetModel):
|
631 |
-
if not isinstance(controlnet_conditioning_image, list):
|
632 |
-
raise TypeError("For multiple controlnets: `image` must be type `list`")
|
633 |
-
if len(controlnet_conditioning_image) != len(self.controlnet.nets):
|
634 |
-
raise ValueError(
|
635 |
-
"For multiple controlnets: `image` must have the same length as the number of controlnets."
|
636 |
-
)
|
637 |
-
for image_ in controlnet_conditioning_image:
|
638 |
-
self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds)
|
639 |
-
else:
|
640 |
-
assert False
|
641 |
-
|
642 |
-
# Check `controlnet_conditioning_scale`
|
643 |
-
if isinstance(self.controlnet, ControlNetModel):
|
644 |
-
if not isinstance(controlnet_conditioning_scale, float):
|
645 |
-
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
|
646 |
-
elif isinstance(self.controlnet, MultiControlNetModel):
|
647 |
-
if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
|
648 |
-
self.controlnet.nets
|
649 |
-
):
|
650 |
-
raise ValueError(
|
651 |
-
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
|
652 |
-
" the same length as the number of controlnets"
|
653 |
-
)
|
654 |
-
else:
|
655 |
-
assert False
|
656 |
-
|
657 |
-
if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor):
|
658 |
-
raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor")
|
659 |
-
|
660 |
-
if isinstance(image, PIL.Image.Image) and not isinstance(mask_image, PIL.Image.Image):
|
661 |
-
raise TypeError("if `image` is a PIL image, `mask_image` must also be a PIL image")
|
662 |
-
|
663 |
-
if isinstance(image, torch.Tensor):
|
664 |
-
if image.ndim != 3 and image.ndim != 4:
|
665 |
-
raise ValueError("`image` must have 3 or 4 dimensions")
|
666 |
-
|
667 |
-
if mask_image.ndim != 2 and mask_image.ndim != 3 and mask_image.ndim != 4:
|
668 |
-
raise ValueError("`mask_image` must have 2, 3, or 4 dimensions")
|
669 |
-
|
670 |
-
if image.ndim == 3:
|
671 |
-
image_batch_size = 1
|
672 |
-
image_channels, image_height, image_width = image.shape
|
673 |
-
elif image.ndim == 4:
|
674 |
-
image_batch_size, image_channels, image_height, image_width = image.shape
|
675 |
-
else:
|
676 |
-
assert False
|
677 |
-
|
678 |
-
if mask_image.ndim == 2:
|
679 |
-
mask_image_batch_size = 1
|
680 |
-
mask_image_channels = 1
|
681 |
-
mask_image_height, mask_image_width = mask_image.shape
|
682 |
-
elif mask_image.ndim == 3:
|
683 |
-
mask_image_channels = 1
|
684 |
-
mask_image_batch_size, mask_image_height, mask_image_width = mask_image.shape
|
685 |
-
elif mask_image.ndim == 4:
|
686 |
-
mask_image_batch_size, mask_image_channels, mask_image_height, mask_image_width = mask_image.shape
|
687 |
-
|
688 |
-
if image_channels != 3:
|
689 |
-
raise ValueError("`image` must have 3 channels")
|
690 |
-
|
691 |
-
if mask_image_channels != 1:
|
692 |
-
raise ValueError("`mask_image` must have 1 channel")
|
693 |
-
|
694 |
-
if image_batch_size != mask_image_batch_size:
|
695 |
-
raise ValueError("`image` and `mask_image` mush have the same batch sizes")
|
696 |
-
|
697 |
-
if image_height != mask_image_height or image_width != mask_image_width:
|
698 |
-
raise ValueError("`image` and `mask_image` must have the same height and width dimensions")
|
699 |
-
|
700 |
-
if image.min() < -1 or image.max() > 1:
|
701 |
-
raise ValueError("`image` should be in range [-1, 1]")
|
702 |
-
|
703 |
-
if mask_image.min() < 0 or mask_image.max() > 1:
|
704 |
-
raise ValueError("`mask_image` should be in range [0, 1]")
|
705 |
-
else:
|
706 |
-
mask_image_channels = 1
|
707 |
-
image_channels = 3
|
708 |
-
|
709 |
-
single_image_latent_channels = self.vae.config.latent_channels
|
710 |
-
|
711 |
-
total_latent_channels = single_image_latent_channels * 2 + mask_image_channels
|
712 |
-
|
713 |
-
if total_latent_channels != self.unet.config.in_channels:
|
714 |
-
raise ValueError(
|
715 |
-
f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received"
|
716 |
-
f" non inpainting latent channels: {single_image_latent_channels},"
|
717 |
-
f" mask channels: {mask_image_channels}, and masked image channels: {single_image_latent_channels}."
|
718 |
-
f" Please verify the config of `pipeline.unet` and the `mask_image` and `image` inputs."
|
719 |
-
)
|
720 |
-
|
721 |
-
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
|
722 |
-
shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
|
723 |
-
if isinstance(generator, list) and len(generator) != batch_size:
|
724 |
-
raise ValueError(
|
725 |
-
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
|
726 |
-
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
|
727 |
-
)
|
728 |
-
|
729 |
-
if latents is None:
|
730 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
731 |
-
else:
|
732 |
-
latents = latents.to(device)
|
733 |
-
|
734 |
-
# scale the initial noise by the standard deviation required by the scheduler
|
735 |
-
latents = latents * self.scheduler.init_noise_sigma
|
736 |
-
|
737 |
-
return latents
|
738 |
-
|
739 |
-
def prepare_mask_latents(self, mask_image, batch_size, height, width, dtype, device, do_classifier_free_guidance):
|
740 |
-
# resize the mask to latents shape as we concatenate the mask to the latents
|
741 |
-
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
|
742 |
-
# and half precision
|
743 |
-
mask_image = F.interpolate(mask_image, size=(height // self.vae_scale_factor, width // self.vae_scale_factor))
|
744 |
-
mask_image = mask_image.to(device=device, dtype=dtype)
|
745 |
-
|
746 |
-
# duplicate mask for each generation per prompt, using mps friendly method
|
747 |
-
if mask_image.shape[0] < batch_size:
|
748 |
-
if not batch_size % mask_image.shape[0] == 0:
|
749 |
-
raise ValueError(
|
750 |
-
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
|
751 |
-
f" a total batch size of {batch_size}, but {mask_image.shape[0]} masks were passed. Make sure the number"
|
752 |
-
" of masks that you pass is divisible by the total requested batch size."
|
753 |
-
)
|
754 |
-
mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1)
|
755 |
-
|
756 |
-
mask_image = torch.cat([mask_image] * 2) if do_classifier_free_guidance else mask_image
|
757 |
-
|
758 |
-
mask_image_latents = mask_image
|
759 |
-
|
760 |
-
return mask_image_latents
|
761 |
-
|
762 |
-
def prepare_masked_image_latents(
|
763 |
-
self, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
|
764 |
-
):
|
765 |
-
masked_image = masked_image.to(device=device, dtype=dtype)
|
766 |
-
|
767 |
-
# encode the mask image into latents space so we can concatenate it to the latents
|
768 |
-
if isinstance(generator, list):
|
769 |
-
masked_image_latents = [
|
770 |
-
self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
|
771 |
-
for i in range(batch_size)
|
772 |
-
]
|
773 |
-
masked_image_latents = torch.cat(masked_image_latents, dim=0)
|
774 |
-
else:
|
775 |
-
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
|
776 |
-
masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
|
777 |
-
|
778 |
-
# duplicate masked_image_latents for each generation per prompt, using mps friendly method
|
779 |
-
if masked_image_latents.shape[0] < batch_size:
|
780 |
-
if not batch_size % masked_image_latents.shape[0] == 0:
|
781 |
-
raise ValueError(
|
782 |
-
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
|
783 |
-
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
|
784 |
-
" Make sure the number of images that you pass is divisible by the total requested batch size."
|
785 |
-
)
|
786 |
-
masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
|
787 |
-
|
788 |
-
masked_image_latents = (
|
789 |
-
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
|
790 |
-
)
|
791 |
-
|
792 |
-
# aligning device to prevent device errors when concating it with the latent model input
|
793 |
-
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
|
794 |
-
return masked_image_latents
|
795 |
-
|
796 |
-
def _default_height_width(self, height, width, image):
|
797 |
-
if isinstance(image, list):
|
798 |
-
image = image[0]
|
799 |
-
|
800 |
-
if height is None:
|
801 |
-
if isinstance(image, PIL.Image.Image):
|
802 |
-
height = image.height
|
803 |
-
elif isinstance(image, torch.Tensor):
|
804 |
-
height = image.shape[3]
|
805 |
-
|
806 |
-
height = (height // 8) * 8 # round down to nearest multiple of 8
|
807 |
-
|
808 |
-
if width is None:
|
809 |
-
if isinstance(image, PIL.Image.Image):
|
810 |
-
width = image.width
|
811 |
-
elif isinstance(image, torch.Tensor):
|
812 |
-
width = image.shape[2]
|
813 |
-
|
814 |
-
width = (width // 8) * 8 # round down to nearest multiple of 8
|
815 |
-
|
816 |
-
return height, width
|
817 |
-
|
818 |
-
@torch.no_grad()
|
819 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
820 |
-
def __call__(
|
821 |
-
self,
|
822 |
-
prompt: Union[str, List[str]] = None,
|
823 |
-
image: Union[torch.Tensor, PIL.Image.Image] = None,
|
824 |
-
mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
|
825 |
-
controlnet_conditioning_image: Union[
|
826 |
-
torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
|
827 |
-
] = None,
|
828 |
-
height: Optional[int] = None,
|
829 |
-
width: Optional[int] = None,
|
830 |
-
num_inference_steps: int = 50,
|
831 |
-
guidance_scale: float = 7.5,
|
832 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
833 |
-
num_images_per_prompt: Optional[int] = 1,
|
834 |
-
eta: float = 0.0,
|
835 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
836 |
-
latents: Optional[torch.FloatTensor] = None,
|
837 |
-
prompt_embeds: Optional[torch.FloatTensor] = None,
|
838 |
-
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
|
839 |
-
output_type: Optional[str] = "pil",
|
840 |
-
return_dict: bool = True,
|
841 |
-
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
842 |
-
callback_steps: int = 1,
|
843 |
-
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
|
844 |
-
controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
|
845 |
-
):
|
846 |
-
r"""
|
847 |
-
Function invoked when calling the pipeline for generation.
|
848 |
-
|
849 |
-
Args:
|
850 |
-
prompt (`str` or `List[str]`, *optional*):
|
851 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
852 |
-
instead.
|
853 |
-
image (`torch.Tensor` or `PIL.Image.Image`):
|
854 |
-
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
|
855 |
-
be masked out with `mask_image` and repainted according to `prompt`.
|
856 |
-
mask_image (`torch.Tensor` or `PIL.Image.Image`):
|
857 |
-
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
858 |
-
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
|
859 |
-
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
|
860 |
-
instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
861 |
-
controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
|
862 |
-
The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
|
863 |
-
the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
|
864 |
-
also be accepted as an image. The control image is automatically resized to fit the output image.
|
865 |
-
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
866 |
-
The height in pixels of the generated image.
|
867 |
-
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
|
868 |
-
The width in pixels of the generated image.
|
869 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
870 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
871 |
-
expense of slower inference.
|
872 |
-
guidance_scale (`float`, *optional*, defaults to 7.5):
|
873 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
874 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
875 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
876 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
877 |
-
usually at the expense of lower image quality.
|
878 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
879 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead.
|
880 |
-
Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
|
881 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
882 |
-
The number of images to generate per prompt.
|
883 |
-
eta (`float`, *optional*, defaults to 0.0):
|
884 |
-
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
885 |
-
[`schedulers.DDIMScheduler`], will be ignored for others.
|
886 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
887 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
888 |
-
to make generation deterministic.
|
889 |
-
latents (`torch.FloatTensor`, *optional*):
|
890 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
891 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
892 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
893 |
-
prompt_embeds (`torch.FloatTensor`, *optional*):
|
894 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
895 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
896 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
897 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
898 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
899 |
-
argument.
|
900 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
901 |
-
The output format of the generate image. Choose between
|
902 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
903 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
904 |
-
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
905 |
-
plain tuple.
|
906 |
-
callback (`Callable`, *optional*):
|
907 |
-
A function that will be called every `callback_steps` steps during inference. The function will be
|
908 |
-
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
909 |
-
callback_steps (`int`, *optional*, defaults to 1):
|
910 |
-
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
911 |
-
called at every step.
|
912 |
-
cross_attention_kwargs (`dict`, *optional*):
|
913 |
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
914 |
-
`self.processor` in
|
915 |
-
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
|
916 |
-
controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
|
917 |
-
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
|
918 |
-
to the residual in the original unet.
|
919 |
-
|
920 |
-
Examples:
|
921 |
-
|
922 |
-
Returns:
|
923 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
924 |
-
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
925 |
-
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
926 |
-
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
927 |
-
(nsfw) content, according to the `safety_checker`.
|
928 |
-
"""
|
929 |
-
# 0. Default height and width to unet
|
930 |
-
height, width = self._default_height_width(height, width, controlnet_conditioning_image)
|
931 |
-
|
932 |
-
# 1. Check inputs. Raise error if not correct
|
933 |
-
self.check_inputs(
|
934 |
-
prompt,
|
935 |
-
image,
|
936 |
-
mask_image,
|
937 |
-
controlnet_conditioning_image,
|
938 |
-
height,
|
939 |
-
width,
|
940 |
-
callback_steps,
|
941 |
-
negative_prompt,
|
942 |
-
prompt_embeds,
|
943 |
-
negative_prompt_embeds,
|
944 |
-
controlnet_conditioning_scale,
|
945 |
-
)
|
946 |
-
|
947 |
-
# 2. Define call parameters
|
948 |
-
if prompt is not None and isinstance(prompt, str):
|
949 |
-
batch_size = 1
|
950 |
-
elif prompt is not None and isinstance(prompt, list):
|
951 |
-
batch_size = len(prompt)
|
952 |
-
else:
|
953 |
-
batch_size = prompt_embeds.shape[0]
|
954 |
-
|
955 |
-
device = self._execution_device
|
956 |
-
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
957 |
-
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
958 |
-
# corresponds to doing no classifier free guidance.
|
959 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
960 |
-
|
961 |
-
if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
|
962 |
-
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
|
963 |
-
|
964 |
-
# 3. Encode input prompt
|
965 |
-
prompt_embeds = self._encode_prompt(
|
966 |
-
prompt,
|
967 |
-
device,
|
968 |
-
num_images_per_prompt,
|
969 |
-
do_classifier_free_guidance,
|
970 |
-
negative_prompt,
|
971 |
-
prompt_embeds=prompt_embeds,
|
972 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
973 |
-
)
|
974 |
-
|
975 |
-
# 4. Prepare mask, image, and controlnet_conditioning_image
|
976 |
-
image = prepare_image(image)
|
977 |
-
|
978 |
-
mask_image = prepare_mask_image(mask_image)
|
979 |
-
|
980 |
-
# condition image(s)
|
981 |
-
if isinstance(self.controlnet, ControlNetModel):
|
982 |
-
controlnet_conditioning_image = prepare_controlnet_conditioning_image(
|
983 |
-
controlnet_conditioning_image=controlnet_conditioning_image,
|
984 |
-
width=width,
|
985 |
-
height=height,
|
986 |
-
batch_size=batch_size * num_images_per_prompt,
|
987 |
-
num_images_per_prompt=num_images_per_prompt,
|
988 |
-
device=device,
|
989 |
-
dtype=self.controlnet.dtype,
|
990 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
991 |
-
)
|
992 |
-
elif isinstance(self.controlnet, MultiControlNetModel):
|
993 |
-
controlnet_conditioning_images = []
|
994 |
-
|
995 |
-
for image_ in controlnet_conditioning_image:
|
996 |
-
image_ = prepare_controlnet_conditioning_image(
|
997 |
-
controlnet_conditioning_image=image_,
|
998 |
-
width=width,
|
999 |
-
height=height,
|
1000 |
-
batch_size=batch_size * num_images_per_prompt,
|
1001 |
-
num_images_per_prompt=num_images_per_prompt,
|
1002 |
-
device=device,
|
1003 |
-
dtype=self.controlnet.dtype,
|
1004 |
-
do_classifier_free_guidance=do_classifier_free_guidance,
|
1005 |
-
)
|
1006 |
-
controlnet_conditioning_images.append(image_)
|
1007 |
-
|
1008 |
-
controlnet_conditioning_image = controlnet_conditioning_images
|
1009 |
-
else:
|
1010 |
-
assert False
|
1011 |
-
|
1012 |
-
masked_image = image * (mask_image < 0.5)
|
1013 |
-
|
1014 |
-
# 5. Prepare timesteps
|
1015 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
1016 |
-
timesteps = self.scheduler.timesteps
|
1017 |
-
|
1018 |
-
# 6. Prepare latent variables
|
1019 |
-
num_channels_latents = self.vae.config.latent_channels
|
1020 |
-
latents = self.prepare_latents(
|
1021 |
-
batch_size * num_images_per_prompt,
|
1022 |
-
num_channels_latents,
|
1023 |
-
height,
|
1024 |
-
width,
|
1025 |
-
prompt_embeds.dtype,
|
1026 |
-
device,
|
1027 |
-
generator,
|
1028 |
-
latents,
|
1029 |
-
)
|
1030 |
-
|
1031 |
-
mask_image_latents = self.prepare_mask_latents(
|
1032 |
-
mask_image,
|
1033 |
-
batch_size * num_images_per_prompt,
|
1034 |
-
height,
|
1035 |
-
width,
|
1036 |
-
prompt_embeds.dtype,
|
1037 |
-
device,
|
1038 |
-
do_classifier_free_guidance,
|
1039 |
-
)
|
1040 |
-
|
1041 |
-
masked_image_latents = self.prepare_masked_image_latents(
|
1042 |
-
masked_image,
|
1043 |
-
batch_size * num_images_per_prompt,
|
1044 |
-
height,
|
1045 |
-
width,
|
1046 |
-
prompt_embeds.dtype,
|
1047 |
-
device,
|
1048 |
-
generator,
|
1049 |
-
do_classifier_free_guidance,
|
1050 |
-
)
|
1051 |
-
|
1052 |
-
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
1053 |
-
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
1054 |
-
|
1055 |
-
# 8. Denoising loop
|
1056 |
-
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
|
1057 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1058 |
-
for i, t in enumerate(timesteps):
|
1059 |
-
# expand the latents if we are doing classifier free guidance
|
1060 |
-
non_inpainting_latent_model_input = (
|
1061 |
-
torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
1062 |
-
)
|
1063 |
-
|
1064 |
-
non_inpainting_latent_model_input = self.scheduler.scale_model_input(
|
1065 |
-
non_inpainting_latent_model_input, t
|
1066 |
-
)
|
1067 |
-
|
1068 |
-
inpainting_latent_model_input = torch.cat(
|
1069 |
-
[non_inpainting_latent_model_input, mask_image_latents, masked_image_latents], dim=1
|
1070 |
-
)
|
1071 |
-
|
1072 |
-
down_block_res_samples, mid_block_res_sample = self.controlnet(
|
1073 |
-
non_inpainting_latent_model_input,
|
1074 |
-
t,
|
1075 |
-
encoder_hidden_states=prompt_embeds,
|
1076 |
-
controlnet_cond=controlnet_conditioning_image,
|
1077 |
-
conditioning_scale=controlnet_conditioning_scale,
|
1078 |
-
return_dict=False,
|
1079 |
-
)
|
1080 |
-
|
1081 |
-
# predict the noise residual
|
1082 |
-
noise_pred = self.unet(
|
1083 |
-
inpainting_latent_model_input,
|
1084 |
-
t,
|
1085 |
-
encoder_hidden_states=prompt_embeds,
|
1086 |
-
cross_attention_kwargs=cross_attention_kwargs,
|
1087 |
-
down_block_additional_residuals=down_block_res_samples,
|
1088 |
-
mid_block_additional_residual=mid_block_res_sample,
|
1089 |
-
).sample
|
1090 |
-
|
1091 |
-
# perform guidance
|
1092 |
-
if do_classifier_free_guidance:
|
1093 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1094 |
-
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1095 |
-
|
1096 |
-
# compute the previous noisy sample x_t -> x_t-1
|
1097 |
-
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
1098 |
-
|
1099 |
-
# call the callback, if provided
|
1100 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
1101 |
-
progress_bar.update()
|
1102 |
-
if callback is not None and i % callback_steps == 0:
|
1103 |
-
callback(i, t, latents)
|
1104 |
-
|
1105 |
-
# If we do sequential model offloading, let's offload unet and controlnet
|
1106 |
-
# manually for max memory savings
|
1107 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1108 |
-
self.unet.to("cpu")
|
1109 |
-
self.controlnet.to("cpu")
|
1110 |
-
torch.cuda.empty_cache()
|
1111 |
-
|
1112 |
-
if output_type == "latent":
|
1113 |
-
image = latents
|
1114 |
-
has_nsfw_concept = None
|
1115 |
-
elif output_type == "pil":
|
1116 |
-
# 8. Post-processing
|
1117 |
-
image = self.decode_latents(latents)
|
1118 |
-
|
1119 |
-
# 9. Run safety checker
|
1120 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
1121 |
-
|
1122 |
-
# 10. Convert to PIL
|
1123 |
-
image = self.numpy_to_pil(image)
|
1124 |
-
else:
|
1125 |
-
# 8. Post-processing
|
1126 |
-
image = self.decode_latents(latents)
|
1127 |
-
|
1128 |
-
# 9. Run safety checker
|
1129 |
-
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
|
1130 |
-
|
1131 |
-
# Offload last model to CPU
|
1132 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
1133 |
-
self.final_offload_hook.offload()
|
1134 |
-
|
1135 |
-
if not return_dict:
|
1136 |
-
return (image, has_nsfw_concept)
|
1137 |
-
|
1138 |
-
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/versatile_diffusion/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/cascade_rcnn_r50_fpn.py',
|
3 |
-
'../_base_/datasets/coco_detection.py',
|
4 |
-
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
|
5 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/optimization/constants.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
ASSETS_DIR_NAME = "assets"
|
2 |
-
RANKED_RESULTS_DIR = "ranked"
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/audiocraft/models/builders.py
DELETED
@@ -1,218 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
All the functions to build the relevant models and modules
|
9 |
-
from the Hydra config.
|
10 |
-
"""
|
11 |
-
|
12 |
-
import typing as tp
|
13 |
-
import warnings
|
14 |
-
|
15 |
-
import audiocraft
|
16 |
-
import omegaconf
|
17 |
-
import torch
|
18 |
-
|
19 |
-
from .encodec import CompressionModel, EncodecModel, FlattenedCompressionModel # noqa
|
20 |
-
from .lm import LMModel
|
21 |
-
from ..modules.codebooks_patterns import (
|
22 |
-
CodebooksPatternProvider,
|
23 |
-
DelayedPatternProvider,
|
24 |
-
ParallelPatternProvider,
|
25 |
-
UnrolledPatternProvider,
|
26 |
-
VALLEPattern,
|
27 |
-
MusicLMPattern,
|
28 |
-
)
|
29 |
-
from ..modules.conditioners import (
|
30 |
-
BaseConditioner,
|
31 |
-
ConditioningProvider,
|
32 |
-
LUTConditioner,
|
33 |
-
T5Conditioner,
|
34 |
-
ConditionFuser,
|
35 |
-
ChromaStemConditioner,
|
36 |
-
)
|
37 |
-
from .. import quantization as qt
|
38 |
-
from ..utils.utils import dict_from_config
|
39 |
-
|
40 |
-
|
41 |
-
def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer:
|
42 |
-
klass = {
|
43 |
-
'no_quant': qt.DummyQuantizer,
|
44 |
-
'rvq': qt.ResidualVectorQuantizer
|
45 |
-
}[quantizer]
|
46 |
-
kwargs = dict_from_config(getattr(cfg, quantizer))
|
47 |
-
if quantizer != 'no_quant':
|
48 |
-
kwargs['dimension'] = dimension
|
49 |
-
return klass(**kwargs)
|
50 |
-
|
51 |
-
|
52 |
-
def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig):
|
53 |
-
if encoder_name == 'seanet':
|
54 |
-
kwargs = dict_from_config(getattr(cfg, 'seanet'))
|
55 |
-
encoder_override_kwargs = kwargs.pop('encoder')
|
56 |
-
decoder_override_kwargs = kwargs.pop('decoder')
|
57 |
-
encoder_kwargs = {**kwargs, **encoder_override_kwargs}
|
58 |
-
decoder_kwargs = {**kwargs, **decoder_override_kwargs}
|
59 |
-
encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs)
|
60 |
-
decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs)
|
61 |
-
return encoder, decoder
|
62 |
-
else:
|
63 |
-
raise KeyError(f'Unexpected compression model {cfg.compression_model}')
|
64 |
-
|
65 |
-
|
66 |
-
def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel:
|
67 |
-
"""Instantiate a compression model.
|
68 |
-
"""
|
69 |
-
if cfg.compression_model == 'encodec':
|
70 |
-
kwargs = dict_from_config(getattr(cfg, 'encodec'))
|
71 |
-
encoder_name = kwargs.pop('autoencoder')
|
72 |
-
quantizer_name = kwargs.pop('quantizer')
|
73 |
-
encoder, decoder = get_encodec_autoencoder(encoder_name, cfg)
|
74 |
-
quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension)
|
75 |
-
frame_rate = kwargs['sample_rate'] // encoder.hop_length
|
76 |
-
renormalize = kwargs.pop('renormalize', None)
|
77 |
-
renorm = kwargs.pop('renorm')
|
78 |
-
if renormalize is None:
|
79 |
-
renormalize = renorm is not None
|
80 |
-
warnings.warn("You are using a deprecated EnCodec model. Please migrate to new renormalization.")
|
81 |
-
return EncodecModel(encoder, decoder, quantizer,
|
82 |
-
frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device)
|
83 |
-
else:
|
84 |
-
raise KeyError(f'Unexpected compression model {cfg.compression_model}')
|
85 |
-
|
86 |
-
|
87 |
-
def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:
|
88 |
-
"""Instantiate a transformer LM.
|
89 |
-
"""
|
90 |
-
if cfg.lm_model == 'transformer_lm':
|
91 |
-
kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))
|
92 |
-
n_q = kwargs['n_q']
|
93 |
-
q_modeling = kwargs.pop('q_modeling', None)
|
94 |
-
codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')
|
95 |
-
attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))
|
96 |
-
cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))
|
97 |
-
cfg_prob, cfg_coef = cls_free_guidance["training_dropout"], cls_free_guidance["inference_coef"]
|
98 |
-
fuser = get_condition_fuser(cfg)
|
99 |
-
condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device)
|
100 |
-
if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programatically
|
101 |
-
kwargs['cross_attention'] = True
|
102 |
-
if codebooks_pattern_cfg.modeling is None:
|
103 |
-
assert q_modeling is not None, \
|
104 |
-
'LM model should either have a codebook pattern defined or transformer_lm.q_modeling'
|
105 |
-
codebooks_pattern_cfg = omegaconf.OmegaConf.create(
|
106 |
-
{'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}
|
107 |
-
)
|
108 |
-
pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)
|
109 |
-
return LMModel(
|
110 |
-
pattern_provider=pattern_provider,
|
111 |
-
condition_provider=condition_provider,
|
112 |
-
fuser=fuser,
|
113 |
-
cfg_dropout=cfg_prob,
|
114 |
-
cfg_coef=cfg_coef,
|
115 |
-
attribute_dropout=attribute_dropout,
|
116 |
-
dtype=getattr(torch, cfg.dtype),
|
117 |
-
device=cfg.device,
|
118 |
-
**kwargs
|
119 |
-
).to(cfg.device)
|
120 |
-
else:
|
121 |
-
raise KeyError(f'Unexpected LM model {cfg.lm_model}')
|
122 |
-
|
123 |
-
|
124 |
-
def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider:
|
125 |
-
"""Instantiate a conditioning model.
|
126 |
-
"""
|
127 |
-
device = cfg.device
|
128 |
-
duration = cfg.dataset.segment_duration
|
129 |
-
cfg = getattr(cfg, "conditioners")
|
130 |
-
cfg = omegaconf.OmegaConf.create({}) if cfg is None else cfg
|
131 |
-
conditioners: tp.Dict[str, BaseConditioner] = {}
|
132 |
-
with omegaconf.open_dict(cfg):
|
133 |
-
condition_provider_args = cfg.pop('args', {})
|
134 |
-
for cond, cond_cfg in cfg.items():
|
135 |
-
model_type = cond_cfg["model"]
|
136 |
-
model_args = cond_cfg[model_type]
|
137 |
-
if model_type == "t5":
|
138 |
-
conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args)
|
139 |
-
elif model_type == "lut":
|
140 |
-
conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args)
|
141 |
-
elif model_type == "chroma_stem":
|
142 |
-
model_args.pop('cache_path', None)
|
143 |
-
conditioners[str(cond)] = ChromaStemConditioner(
|
144 |
-
output_dim=output_dim,
|
145 |
-
duration=duration,
|
146 |
-
device=device,
|
147 |
-
**model_args
|
148 |
-
)
|
149 |
-
else:
|
150 |
-
raise ValueError(f"unrecognized conditioning model: {model_type}")
|
151 |
-
conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args)
|
152 |
-
return conditioner
|
153 |
-
|
154 |
-
|
155 |
-
def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser:
|
156 |
-
"""Instantiate a condition fuser object.
|
157 |
-
"""
|
158 |
-
fuser_cfg = getattr(cfg, "fuser")
|
159 |
-
fuser_methods = ["sum", "cross", "prepend", "input_interpolate"]
|
160 |
-
fuse2cond = {k: fuser_cfg[k] for k in fuser_methods}
|
161 |
-
kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods}
|
162 |
-
fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs)
|
163 |
-
return fuser
|
164 |
-
|
165 |
-
|
166 |
-
def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider:
|
167 |
-
"""Instantiate a codebooks pattern provider object.
|
168 |
-
"""
|
169 |
-
pattern_providers = {
|
170 |
-
'parallel': ParallelPatternProvider,
|
171 |
-
'delay': DelayedPatternProvider,
|
172 |
-
'unroll': UnrolledPatternProvider,
|
173 |
-
'valle': VALLEPattern,
|
174 |
-
'musiclm': MusicLMPattern,
|
175 |
-
}
|
176 |
-
name = cfg.modeling
|
177 |
-
kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {}
|
178 |
-
klass = pattern_providers[name]
|
179 |
-
return klass(n_q, **kwargs)
|
180 |
-
|
181 |
-
|
182 |
-
def get_debug_compression_model(device='cpu'):
|
183 |
-
"""Instantiate a debug compression model to be used for unit tests.
|
184 |
-
"""
|
185 |
-
seanet_kwargs = {
|
186 |
-
'n_filters': 4,
|
187 |
-
'n_residual_layers': 1,
|
188 |
-
'dimension': 32,
|
189 |
-
'ratios': [10, 8, 16] # 25 Hz at 32kHz
|
190 |
-
}
|
191 |
-
encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs)
|
192 |
-
decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs)
|
193 |
-
quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4)
|
194 |
-
init_x = torch.randn(8, 32, 128)
|
195 |
-
quantizer(init_x, 1) # initialize kmeans etc.
|
196 |
-
compression_model = EncodecModel(
|
197 |
-
encoder, decoder, quantizer,
|
198 |
-
frame_rate=25, sample_rate=32000, channels=1).to(device)
|
199 |
-
return compression_model.eval()
|
200 |
-
|
201 |
-
|
202 |
-
def get_debug_lm_model(device='cpu'):
|
203 |
-
"""Instantiate a debug LM to be used for unit tests.
|
204 |
-
"""
|
205 |
-
pattern = DelayedPatternProvider(n_q=4)
|
206 |
-
dim = 16
|
207 |
-
providers = {
|
208 |
-
'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"),
|
209 |
-
}
|
210 |
-
condition_provider = ConditioningProvider(providers)
|
211 |
-
fuser = ConditionFuser(
|
212 |
-
{'cross': ['description'], 'prepend': [],
|
213 |
-
'sum': [], 'input_interpolate': []})
|
214 |
-
lm = LMModel(
|
215 |
-
pattern, condition_provider, fuser,
|
216 |
-
n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2,
|
217 |
-
cross_attention=True, causal=True)
|
218 |
-
return lm.to(device).eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/list.py
DELETED
@@ -1,365 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import logging
|
3 |
-
from optparse import Values
|
4 |
-
from typing import TYPE_CHECKING, Generator, List, Optional, Sequence, Tuple, cast
|
5 |
-
|
6 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
7 |
-
|
8 |
-
from pip._internal.cli import cmdoptions
|
9 |
-
from pip._internal.cli.req_command import IndexGroupCommand
|
10 |
-
from pip._internal.cli.status_codes import SUCCESS
|
11 |
-
from pip._internal.exceptions import CommandError
|
12 |
-
from pip._internal.index.collector import LinkCollector
|
13 |
-
from pip._internal.index.package_finder import PackageFinder
|
14 |
-
from pip._internal.metadata import BaseDistribution, get_environment
|
15 |
-
from pip._internal.models.selection_prefs import SelectionPreferences
|
16 |
-
from pip._internal.network.session import PipSession
|
17 |
-
from pip._internal.utils.compat import stdlib_pkgs
|
18 |
-
from pip._internal.utils.misc import tabulate, write_output
|
19 |
-
|
20 |
-
if TYPE_CHECKING:
|
21 |
-
from pip._internal.metadata.base import DistributionVersion
|
22 |
-
|
23 |
-
class _DistWithLatestInfo(BaseDistribution):
|
24 |
-
"""Give the distribution object a couple of extra fields.
|
25 |
-
|
26 |
-
These will be populated during ``get_outdated()``. This is dirty but
|
27 |
-
makes the rest of the code much cleaner.
|
28 |
-
"""
|
29 |
-
|
30 |
-
latest_version: DistributionVersion
|
31 |
-
latest_filetype: str
|
32 |
-
|
33 |
-
_ProcessedDists = Sequence[_DistWithLatestInfo]
|
34 |
-
|
35 |
-
|
36 |
-
logger = logging.getLogger(__name__)
|
37 |
-
|
38 |
-
|
39 |
-
class ListCommand(IndexGroupCommand):
|
40 |
-
"""
|
41 |
-
List installed packages, including editables.
|
42 |
-
|
43 |
-
Packages are listed in a case-insensitive sorted order.
|
44 |
-
"""
|
45 |
-
|
46 |
-
ignore_require_venv = True
|
47 |
-
usage = """
|
48 |
-
%prog [options]"""
|
49 |
-
|
50 |
-
def add_options(self) -> None:
|
51 |
-
self.cmd_opts.add_option(
|
52 |
-
"-o",
|
53 |
-
"--outdated",
|
54 |
-
action="store_true",
|
55 |
-
default=False,
|
56 |
-
help="List outdated packages",
|
57 |
-
)
|
58 |
-
self.cmd_opts.add_option(
|
59 |
-
"-u",
|
60 |
-
"--uptodate",
|
61 |
-
action="store_true",
|
62 |
-
default=False,
|
63 |
-
help="List uptodate packages",
|
64 |
-
)
|
65 |
-
self.cmd_opts.add_option(
|
66 |
-
"-e",
|
67 |
-
"--editable",
|
68 |
-
action="store_true",
|
69 |
-
default=False,
|
70 |
-
help="List editable projects.",
|
71 |
-
)
|
72 |
-
self.cmd_opts.add_option(
|
73 |
-
"-l",
|
74 |
-
"--local",
|
75 |
-
action="store_true",
|
76 |
-
default=False,
|
77 |
-
help=(
|
78 |
-
"If in a virtualenv that has global access, do not list "
|
79 |
-
"globally-installed packages."
|
80 |
-
),
|
81 |
-
)
|
82 |
-
self.cmd_opts.add_option(
|
83 |
-
"--user",
|
84 |
-
dest="user",
|
85 |
-
action="store_true",
|
86 |
-
default=False,
|
87 |
-
help="Only output packages installed in user-site.",
|
88 |
-
)
|
89 |
-
self.cmd_opts.add_option(cmdoptions.list_path())
|
90 |
-
self.cmd_opts.add_option(
|
91 |
-
"--pre",
|
92 |
-
action="store_true",
|
93 |
-
default=False,
|
94 |
-
help=(
|
95 |
-
"Include pre-release and development versions. By default, "
|
96 |
-
"pip only finds stable versions."
|
97 |
-
),
|
98 |
-
)
|
99 |
-
|
100 |
-
self.cmd_opts.add_option(
|
101 |
-
"--format",
|
102 |
-
action="store",
|
103 |
-
dest="list_format",
|
104 |
-
default="columns",
|
105 |
-
choices=("columns", "freeze", "json"),
|
106 |
-
help="Select the output format among: columns (default), freeze, or json",
|
107 |
-
)
|
108 |
-
|
109 |
-
self.cmd_opts.add_option(
|
110 |
-
"--not-required",
|
111 |
-
action="store_true",
|
112 |
-
dest="not_required",
|
113 |
-
help="List packages that are not dependencies of installed packages.",
|
114 |
-
)
|
115 |
-
|
116 |
-
self.cmd_opts.add_option(
|
117 |
-
"--exclude-editable",
|
118 |
-
action="store_false",
|
119 |
-
dest="include_editable",
|
120 |
-
help="Exclude editable package from output.",
|
121 |
-
)
|
122 |
-
self.cmd_opts.add_option(
|
123 |
-
"--include-editable",
|
124 |
-
action="store_true",
|
125 |
-
dest="include_editable",
|
126 |
-
help="Include editable package from output.",
|
127 |
-
default=True,
|
128 |
-
)
|
129 |
-
self.cmd_opts.add_option(cmdoptions.list_exclude())
|
130 |
-
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
|
131 |
-
|
132 |
-
self.parser.insert_option_group(0, index_opts)
|
133 |
-
self.parser.insert_option_group(0, self.cmd_opts)
|
134 |
-
|
135 |
-
def _build_package_finder(
|
136 |
-
self, options: Values, session: PipSession
|
137 |
-
) -> PackageFinder:
|
138 |
-
"""
|
139 |
-
Create a package finder appropriate to this list command.
|
140 |
-
"""
|
141 |
-
link_collector = LinkCollector.create(session, options=options)
|
142 |
-
|
143 |
-
# Pass allow_yanked=False to ignore yanked versions.
|
144 |
-
selection_prefs = SelectionPreferences(
|
145 |
-
allow_yanked=False,
|
146 |
-
allow_all_prereleases=options.pre,
|
147 |
-
)
|
148 |
-
|
149 |
-
return PackageFinder.create(
|
150 |
-
link_collector=link_collector,
|
151 |
-
selection_prefs=selection_prefs,
|
152 |
-
)
|
153 |
-
|
154 |
-
def run(self, options: Values, args: List[str]) -> int:
|
155 |
-
if options.outdated and options.uptodate:
|
156 |
-
raise CommandError("Options --outdated and --uptodate cannot be combined.")
|
157 |
-
|
158 |
-
if options.outdated and options.list_format == "freeze":
|
159 |
-
raise CommandError(
|
160 |
-
"List format 'freeze' can not be used with the --outdated option."
|
161 |
-
)
|
162 |
-
|
163 |
-
cmdoptions.check_list_path_option(options)
|
164 |
-
|
165 |
-
skip = set(stdlib_pkgs)
|
166 |
-
if options.excludes:
|
167 |
-
skip.update(canonicalize_name(n) for n in options.excludes)
|
168 |
-
|
169 |
-
packages: "_ProcessedDists" = [
|
170 |
-
cast("_DistWithLatestInfo", d)
|
171 |
-
for d in get_environment(options.path).iter_installed_distributions(
|
172 |
-
local_only=options.local,
|
173 |
-
user_only=options.user,
|
174 |
-
editables_only=options.editable,
|
175 |
-
include_editables=options.include_editable,
|
176 |
-
skip=skip,
|
177 |
-
)
|
178 |
-
]
|
179 |
-
|
180 |
-
# get_not_required must be called firstly in order to find and
|
181 |
-
# filter out all dependencies correctly. Otherwise a package
|
182 |
-
# can't be identified as requirement because some parent packages
|
183 |
-
# could be filtered out before.
|
184 |
-
if options.not_required:
|
185 |
-
packages = self.get_not_required(packages, options)
|
186 |
-
|
187 |
-
if options.outdated:
|
188 |
-
packages = self.get_outdated(packages, options)
|
189 |
-
elif options.uptodate:
|
190 |
-
packages = self.get_uptodate(packages, options)
|
191 |
-
|
192 |
-
self.output_package_listing(packages, options)
|
193 |
-
return SUCCESS
|
194 |
-
|
195 |
-
def get_outdated(
|
196 |
-
self, packages: "_ProcessedDists", options: Values
|
197 |
-
) -> "_ProcessedDists":
|
198 |
-
return [
|
199 |
-
dist
|
200 |
-
for dist in self.iter_packages_latest_infos(packages, options)
|
201 |
-
if dist.latest_version > dist.version
|
202 |
-
]
|
203 |
-
|
204 |
-
def get_uptodate(
|
205 |
-
self, packages: "_ProcessedDists", options: Values
|
206 |
-
) -> "_ProcessedDists":
|
207 |
-
return [
|
208 |
-
dist
|
209 |
-
for dist in self.iter_packages_latest_infos(packages, options)
|
210 |
-
if dist.latest_version == dist.version
|
211 |
-
]
|
212 |
-
|
213 |
-
def get_not_required(
|
214 |
-
self, packages: "_ProcessedDists", options: Values
|
215 |
-
) -> "_ProcessedDists":
|
216 |
-
dep_keys = {
|
217 |
-
canonicalize_name(dep.name)
|
218 |
-
for dist in packages
|
219 |
-
for dep in (dist.iter_dependencies() or ())
|
220 |
-
}
|
221 |
-
|
222 |
-
# Create a set to remove duplicate packages, and cast it to a list
|
223 |
-
# to keep the return type consistent with get_outdated and
|
224 |
-
# get_uptodate
|
225 |
-
return list({pkg for pkg in packages if pkg.canonical_name not in dep_keys})
|
226 |
-
|
227 |
-
def iter_packages_latest_infos(
|
228 |
-
self, packages: "_ProcessedDists", options: Values
|
229 |
-
) -> Generator["_DistWithLatestInfo", None, None]:
|
230 |
-
with self._build_session(options) as session:
|
231 |
-
finder = self._build_package_finder(options, session)
|
232 |
-
|
233 |
-
def latest_info(
|
234 |
-
dist: "_DistWithLatestInfo",
|
235 |
-
) -> Optional["_DistWithLatestInfo"]:
|
236 |
-
all_candidates = finder.find_all_candidates(dist.canonical_name)
|
237 |
-
if not options.pre:
|
238 |
-
# Remove prereleases
|
239 |
-
all_candidates = [
|
240 |
-
candidate
|
241 |
-
for candidate in all_candidates
|
242 |
-
if not candidate.version.is_prerelease
|
243 |
-
]
|
244 |
-
|
245 |
-
evaluator = finder.make_candidate_evaluator(
|
246 |
-
project_name=dist.canonical_name,
|
247 |
-
)
|
248 |
-
best_candidate = evaluator.sort_best_candidate(all_candidates)
|
249 |
-
if best_candidate is None:
|
250 |
-
return None
|
251 |
-
|
252 |
-
remote_version = best_candidate.version
|
253 |
-
if best_candidate.link.is_wheel:
|
254 |
-
typ = "wheel"
|
255 |
-
else:
|
256 |
-
typ = "sdist"
|
257 |
-
dist.latest_version = remote_version
|
258 |
-
dist.latest_filetype = typ
|
259 |
-
return dist
|
260 |
-
|
261 |
-
for dist in map(latest_info, packages):
|
262 |
-
if dist is not None:
|
263 |
-
yield dist
|
264 |
-
|
265 |
-
def output_package_listing(
|
266 |
-
self, packages: "_ProcessedDists", options: Values
|
267 |
-
) -> None:
|
268 |
-
packages = sorted(
|
269 |
-
packages,
|
270 |
-
key=lambda dist: dist.canonical_name,
|
271 |
-
)
|
272 |
-
if options.list_format == "columns" and packages:
|
273 |
-
data, header = format_for_columns(packages, options)
|
274 |
-
self.output_package_listing_columns(data, header)
|
275 |
-
elif options.list_format == "freeze":
|
276 |
-
for dist in packages:
|
277 |
-
if options.verbose >= 1:
|
278 |
-
write_output(
|
279 |
-
"%s==%s (%s)", dist.raw_name, dist.version, dist.location
|
280 |
-
)
|
281 |
-
else:
|
282 |
-
write_output("%s==%s", dist.raw_name, dist.version)
|
283 |
-
elif options.list_format == "json":
|
284 |
-
write_output(format_for_json(packages, options))
|
285 |
-
|
286 |
-
def output_package_listing_columns(
|
287 |
-
self, data: List[List[str]], header: List[str]
|
288 |
-
) -> None:
|
289 |
-
# insert the header first: we need to know the size of column names
|
290 |
-
if len(data) > 0:
|
291 |
-
data.insert(0, header)
|
292 |
-
|
293 |
-
pkg_strings, sizes = tabulate(data)
|
294 |
-
|
295 |
-
# Create and add a separator.
|
296 |
-
if len(data) > 0:
|
297 |
-
pkg_strings.insert(1, " ".join(map(lambda x: "-" * x, sizes)))
|
298 |
-
|
299 |
-
for val in pkg_strings:
|
300 |
-
write_output(val)
|
301 |
-
|
302 |
-
|
303 |
-
def format_for_columns(
|
304 |
-
pkgs: "_ProcessedDists", options: Values
|
305 |
-
) -> Tuple[List[List[str]], List[str]]:
|
306 |
-
"""
|
307 |
-
Convert the package data into something usable
|
308 |
-
by output_package_listing_columns.
|
309 |
-
"""
|
310 |
-
header = ["Package", "Version"]
|
311 |
-
|
312 |
-
running_outdated = options.outdated
|
313 |
-
if running_outdated:
|
314 |
-
header.extend(["Latest", "Type"])
|
315 |
-
|
316 |
-
has_editables = any(x.editable for x in pkgs)
|
317 |
-
if has_editables:
|
318 |
-
header.append("Editable project location")
|
319 |
-
|
320 |
-
if options.verbose >= 1:
|
321 |
-
header.append("Location")
|
322 |
-
if options.verbose >= 1:
|
323 |
-
header.append("Installer")
|
324 |
-
|
325 |
-
data = []
|
326 |
-
for proj in pkgs:
|
327 |
-
# if we're working on the 'outdated' list, separate out the
|
328 |
-
# latest_version and type
|
329 |
-
row = [proj.raw_name, str(proj.version)]
|
330 |
-
|
331 |
-
if running_outdated:
|
332 |
-
row.append(str(proj.latest_version))
|
333 |
-
row.append(proj.latest_filetype)
|
334 |
-
|
335 |
-
if has_editables:
|
336 |
-
row.append(proj.editable_project_location or "")
|
337 |
-
|
338 |
-
if options.verbose >= 1:
|
339 |
-
row.append(proj.location or "")
|
340 |
-
if options.verbose >= 1:
|
341 |
-
row.append(proj.installer)
|
342 |
-
|
343 |
-
data.append(row)
|
344 |
-
|
345 |
-
return data, header
|
346 |
-
|
347 |
-
|
348 |
-
def format_for_json(packages: "_ProcessedDists", options: Values) -> str:
|
349 |
-
data = []
|
350 |
-
for dist in packages:
|
351 |
-
info = {
|
352 |
-
"name": dist.raw_name,
|
353 |
-
"version": str(dist.version),
|
354 |
-
}
|
355 |
-
if options.verbose >= 1:
|
356 |
-
info["location"] = dist.location or ""
|
357 |
-
info["installer"] = dist.installer
|
358 |
-
if options.outdated:
|
359 |
-
info["latest_version"] = str(dist.latest_version)
|
360 |
-
info["latest_filetype"] = dist.latest_filetype
|
361 |
-
editable_project_location = dist.editable_project_location
|
362 |
-
if editable_project_location:
|
363 |
-
info["editable_project_location"] = editable_project_location
|
364 |
-
data.append(info)
|
365 |
-
return json.dumps(data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/metadata/importlib/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from ._dists import Distribution
|
2 |
-
from ._envs import Environment
|
3 |
-
|
4 |
-
__all__ = ["Distribution", "Environment"]
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/win32.py
DELETED
@@ -1,180 +0,0 @@
|
|
1 |
-
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
|
2 |
-
|
3 |
-
# from winbase.h
|
4 |
-
STDOUT = -11
|
5 |
-
STDERR = -12
|
6 |
-
|
7 |
-
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
|
8 |
-
|
9 |
-
try:
|
10 |
-
import ctypes
|
11 |
-
from ctypes import LibraryLoader
|
12 |
-
windll = LibraryLoader(ctypes.WinDLL)
|
13 |
-
from ctypes import wintypes
|
14 |
-
except (AttributeError, ImportError):
|
15 |
-
windll = None
|
16 |
-
SetConsoleTextAttribute = lambda *_: None
|
17 |
-
winapi_test = lambda *_: None
|
18 |
-
else:
|
19 |
-
from ctypes import byref, Structure, c_char, POINTER
|
20 |
-
|
21 |
-
COORD = wintypes._COORD
|
22 |
-
|
23 |
-
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
|
24 |
-
"""struct in wincon.h."""
|
25 |
-
_fields_ = [
|
26 |
-
("dwSize", COORD),
|
27 |
-
("dwCursorPosition", COORD),
|
28 |
-
("wAttributes", wintypes.WORD),
|
29 |
-
("srWindow", wintypes.SMALL_RECT),
|
30 |
-
("dwMaximumWindowSize", COORD),
|
31 |
-
]
|
32 |
-
def __str__(self):
|
33 |
-
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
|
34 |
-
self.dwSize.Y, self.dwSize.X
|
35 |
-
, self.dwCursorPosition.Y, self.dwCursorPosition.X
|
36 |
-
, self.wAttributes
|
37 |
-
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
|
38 |
-
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
|
39 |
-
)
|
40 |
-
|
41 |
-
_GetStdHandle = windll.kernel32.GetStdHandle
|
42 |
-
_GetStdHandle.argtypes = [
|
43 |
-
wintypes.DWORD,
|
44 |
-
]
|
45 |
-
_GetStdHandle.restype = wintypes.HANDLE
|
46 |
-
|
47 |
-
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
|
48 |
-
_GetConsoleScreenBufferInfo.argtypes = [
|
49 |
-
wintypes.HANDLE,
|
50 |
-
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
|
51 |
-
]
|
52 |
-
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
|
53 |
-
|
54 |
-
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
|
55 |
-
_SetConsoleTextAttribute.argtypes = [
|
56 |
-
wintypes.HANDLE,
|
57 |
-
wintypes.WORD,
|
58 |
-
]
|
59 |
-
_SetConsoleTextAttribute.restype = wintypes.BOOL
|
60 |
-
|
61 |
-
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
|
62 |
-
_SetConsoleCursorPosition.argtypes = [
|
63 |
-
wintypes.HANDLE,
|
64 |
-
COORD,
|
65 |
-
]
|
66 |
-
_SetConsoleCursorPosition.restype = wintypes.BOOL
|
67 |
-
|
68 |
-
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
|
69 |
-
_FillConsoleOutputCharacterA.argtypes = [
|
70 |
-
wintypes.HANDLE,
|
71 |
-
c_char,
|
72 |
-
wintypes.DWORD,
|
73 |
-
COORD,
|
74 |
-
POINTER(wintypes.DWORD),
|
75 |
-
]
|
76 |
-
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
|
77 |
-
|
78 |
-
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
|
79 |
-
_FillConsoleOutputAttribute.argtypes = [
|
80 |
-
wintypes.HANDLE,
|
81 |
-
wintypes.WORD,
|
82 |
-
wintypes.DWORD,
|
83 |
-
COORD,
|
84 |
-
POINTER(wintypes.DWORD),
|
85 |
-
]
|
86 |
-
_FillConsoleOutputAttribute.restype = wintypes.BOOL
|
87 |
-
|
88 |
-
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
|
89 |
-
_SetConsoleTitleW.argtypes = [
|
90 |
-
wintypes.LPCWSTR
|
91 |
-
]
|
92 |
-
_SetConsoleTitleW.restype = wintypes.BOOL
|
93 |
-
|
94 |
-
_GetConsoleMode = windll.kernel32.GetConsoleMode
|
95 |
-
_GetConsoleMode.argtypes = [
|
96 |
-
wintypes.HANDLE,
|
97 |
-
POINTER(wintypes.DWORD)
|
98 |
-
]
|
99 |
-
_GetConsoleMode.restype = wintypes.BOOL
|
100 |
-
|
101 |
-
_SetConsoleMode = windll.kernel32.SetConsoleMode
|
102 |
-
_SetConsoleMode.argtypes = [
|
103 |
-
wintypes.HANDLE,
|
104 |
-
wintypes.DWORD
|
105 |
-
]
|
106 |
-
_SetConsoleMode.restype = wintypes.BOOL
|
107 |
-
|
108 |
-
def _winapi_test(handle):
|
109 |
-
csbi = CONSOLE_SCREEN_BUFFER_INFO()
|
110 |
-
success = _GetConsoleScreenBufferInfo(
|
111 |
-
handle, byref(csbi))
|
112 |
-
return bool(success)
|
113 |
-
|
114 |
-
def winapi_test():
|
115 |
-
return any(_winapi_test(h) for h in
|
116 |
-
(_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
|
117 |
-
|
118 |
-
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
|
119 |
-
handle = _GetStdHandle(stream_id)
|
120 |
-
csbi = CONSOLE_SCREEN_BUFFER_INFO()
|
121 |
-
success = _GetConsoleScreenBufferInfo(
|
122 |
-
handle, byref(csbi))
|
123 |
-
return csbi
|
124 |
-
|
125 |
-
def SetConsoleTextAttribute(stream_id, attrs):
|
126 |
-
handle = _GetStdHandle(stream_id)
|
127 |
-
return _SetConsoleTextAttribute(handle, attrs)
|
128 |
-
|
129 |
-
def SetConsoleCursorPosition(stream_id, position, adjust=True):
|
130 |
-
position = COORD(*position)
|
131 |
-
# If the position is out of range, do nothing.
|
132 |
-
if position.Y <= 0 or position.X <= 0:
|
133 |
-
return
|
134 |
-
# Adjust for Windows' SetConsoleCursorPosition:
|
135 |
-
# 1. being 0-based, while ANSI is 1-based.
|
136 |
-
# 2. expecting (x,y), while ANSI uses (y,x).
|
137 |
-
adjusted_position = COORD(position.Y - 1, position.X - 1)
|
138 |
-
if adjust:
|
139 |
-
# Adjust for viewport's scroll position
|
140 |
-
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
|
141 |
-
adjusted_position.Y += sr.Top
|
142 |
-
adjusted_position.X += sr.Left
|
143 |
-
# Resume normal processing
|
144 |
-
handle = _GetStdHandle(stream_id)
|
145 |
-
return _SetConsoleCursorPosition(handle, adjusted_position)
|
146 |
-
|
147 |
-
def FillConsoleOutputCharacter(stream_id, char, length, start):
|
148 |
-
handle = _GetStdHandle(stream_id)
|
149 |
-
char = c_char(char.encode())
|
150 |
-
length = wintypes.DWORD(length)
|
151 |
-
num_written = wintypes.DWORD(0)
|
152 |
-
# Note that this is hard-coded for ANSI (vs wide) bytes.
|
153 |
-
success = _FillConsoleOutputCharacterA(
|
154 |
-
handle, char, length, start, byref(num_written))
|
155 |
-
return num_written.value
|
156 |
-
|
157 |
-
def FillConsoleOutputAttribute(stream_id, attr, length, start):
|
158 |
-
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
|
159 |
-
handle = _GetStdHandle(stream_id)
|
160 |
-
attribute = wintypes.WORD(attr)
|
161 |
-
length = wintypes.DWORD(length)
|
162 |
-
num_written = wintypes.DWORD(0)
|
163 |
-
# Note that this is hard-coded for ANSI (vs wide) bytes.
|
164 |
-
return _FillConsoleOutputAttribute(
|
165 |
-
handle, attribute, length, start, byref(num_written))
|
166 |
-
|
167 |
-
def SetConsoleTitle(title):
|
168 |
-
return _SetConsoleTitleW(title)
|
169 |
-
|
170 |
-
def GetConsoleMode(handle):
|
171 |
-
mode = wintypes.DWORD()
|
172 |
-
success = _GetConsoleMode(handle, byref(mode))
|
173 |
-
if not success:
|
174 |
-
raise ctypes.WinError()
|
175 |
-
return mode.value
|
176 |
-
|
177 |
-
def SetConsoleMode(handle, mode):
|
178 |
-
success = _SetConsoleMode(handle, mode)
|
179 |
-
if not success:
|
180 |
-
raise ctypes.WinError()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/archive_util.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
"""Utilities for extracting common archive formats"""
|
2 |
-
|
3 |
-
import zipfile
|
4 |
-
import tarfile
|
5 |
-
import os
|
6 |
-
import shutil
|
7 |
-
import posixpath
|
8 |
-
import contextlib
|
9 |
-
from distutils.errors import DistutilsError
|
10 |
-
|
11 |
-
from ._path import ensure_directory
|
12 |
-
|
13 |
-
__all__ = [
|
14 |
-
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
|
15 |
-
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
|
16 |
-
]
|
17 |
-
|
18 |
-
|
19 |
-
class UnrecognizedFormat(DistutilsError):
|
20 |
-
"""Couldn't recognize the archive type"""
|
21 |
-
|
22 |
-
|
23 |
-
def default_filter(src, dst):
|
24 |
-
"""The default progress/filter callback; returns True for all files"""
|
25 |
-
return dst
|
26 |
-
|
27 |
-
|
28 |
-
def unpack_archive(
|
29 |
-
filename, extract_dir, progress_filter=default_filter,
|
30 |
-
drivers=None):
|
31 |
-
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
|
32 |
-
|
33 |
-
`progress_filter` is a function taking two arguments: a source path
|
34 |
-
internal to the archive ('/'-separated), and a filesystem path where it
|
35 |
-
will be extracted. The callback must return the desired extract path
|
36 |
-
(which may be the same as the one passed in), or else ``None`` to skip
|
37 |
-
that file or directory. The callback can thus be used to report on the
|
38 |
-
progress of the extraction, as well as to filter the items extracted or
|
39 |
-
alter their extraction paths.
|
40 |
-
|
41 |
-
`drivers`, if supplied, must be a non-empty sequence of functions with the
|
42 |
-
same signature as this function (minus the `drivers` argument), that raise
|
43 |
-
``UnrecognizedFormat`` if they do not support extracting the designated
|
44 |
-
archive type. The `drivers` are tried in sequence until one is found that
|
45 |
-
does not raise an error, or until all are exhausted (in which case
|
46 |
-
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
|
47 |
-
drivers, the module's ``extraction_drivers`` constant will be used, which
|
48 |
-
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
|
49 |
-
order.
|
50 |
-
"""
|
51 |
-
for driver in drivers or extraction_drivers:
|
52 |
-
try:
|
53 |
-
driver(filename, extract_dir, progress_filter)
|
54 |
-
except UnrecognizedFormat:
|
55 |
-
continue
|
56 |
-
else:
|
57 |
-
return
|
58 |
-
else:
|
59 |
-
raise UnrecognizedFormat(
|
60 |
-
"Not a recognized archive type: %s" % filename
|
61 |
-
)
|
62 |
-
|
63 |
-
|
64 |
-
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
|
65 |
-
""""Unpack" a directory, using the same interface as for archives
|
66 |
-
|
67 |
-
Raises ``UnrecognizedFormat`` if `filename` is not a directory
|
68 |
-
"""
|
69 |
-
if not os.path.isdir(filename):
|
70 |
-
raise UnrecognizedFormat("%s is not a directory" % filename)
|
71 |
-
|
72 |
-
paths = {
|
73 |
-
filename: ('', extract_dir),
|
74 |
-
}
|
75 |
-
for base, dirs, files in os.walk(filename):
|
76 |
-
src, dst = paths[base]
|
77 |
-
for d in dirs:
|
78 |
-
paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
|
79 |
-
for f in files:
|
80 |
-
target = os.path.join(dst, f)
|
81 |
-
target = progress_filter(src + f, target)
|
82 |
-
if not target:
|
83 |
-
# skip non-files
|
84 |
-
continue
|
85 |
-
ensure_directory(target)
|
86 |
-
f = os.path.join(base, f)
|
87 |
-
shutil.copyfile(f, target)
|
88 |
-
shutil.copystat(f, target)
|
89 |
-
|
90 |
-
|
91 |
-
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
|
92 |
-
"""Unpack zip `filename` to `extract_dir`
|
93 |
-
|
94 |
-
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
|
95 |
-
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
|
96 |
-
of the `progress_filter` argument.
|
97 |
-
"""
|
98 |
-
|
99 |
-
if not zipfile.is_zipfile(filename):
|
100 |
-
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
|
101 |
-
|
102 |
-
with zipfile.ZipFile(filename) as z:
|
103 |
-
_unpack_zipfile_obj(z, extract_dir, progress_filter)
|
104 |
-
|
105 |
-
|
106 |
-
def _unpack_zipfile_obj(zipfile_obj, extract_dir, progress_filter=default_filter):
|
107 |
-
"""Internal/private API used by other parts of setuptools.
|
108 |
-
Similar to ``unpack_zipfile``, but receives an already opened :obj:`zipfile.ZipFile`
|
109 |
-
object instead of a filename.
|
110 |
-
"""
|
111 |
-
for info in zipfile_obj.infolist():
|
112 |
-
name = info.filename
|
113 |
-
|
114 |
-
# don't extract absolute paths or ones with .. in them
|
115 |
-
if name.startswith('/') or '..' in name.split('/'):
|
116 |
-
continue
|
117 |
-
|
118 |
-
target = os.path.join(extract_dir, *name.split('/'))
|
119 |
-
target = progress_filter(name, target)
|
120 |
-
if not target:
|
121 |
-
continue
|
122 |
-
if name.endswith('/'):
|
123 |
-
# directory
|
124 |
-
ensure_directory(target)
|
125 |
-
else:
|
126 |
-
# file
|
127 |
-
ensure_directory(target)
|
128 |
-
data = zipfile_obj.read(info.filename)
|
129 |
-
with open(target, 'wb') as f:
|
130 |
-
f.write(data)
|
131 |
-
unix_attributes = info.external_attr >> 16
|
132 |
-
if unix_attributes:
|
133 |
-
os.chmod(target, unix_attributes)
|
134 |
-
|
135 |
-
|
136 |
-
def _resolve_tar_file_or_dir(tar_obj, tar_member_obj):
|
137 |
-
"""Resolve any links and extract link targets as normal files."""
|
138 |
-
while tar_member_obj is not None and (
|
139 |
-
tar_member_obj.islnk() or tar_member_obj.issym()):
|
140 |
-
linkpath = tar_member_obj.linkname
|
141 |
-
if tar_member_obj.issym():
|
142 |
-
base = posixpath.dirname(tar_member_obj.name)
|
143 |
-
linkpath = posixpath.join(base, linkpath)
|
144 |
-
linkpath = posixpath.normpath(linkpath)
|
145 |
-
tar_member_obj = tar_obj._getmember(linkpath)
|
146 |
-
|
147 |
-
is_file_or_dir = (
|
148 |
-
tar_member_obj is not None and
|
149 |
-
(tar_member_obj.isfile() or tar_member_obj.isdir())
|
150 |
-
)
|
151 |
-
if is_file_or_dir:
|
152 |
-
return tar_member_obj
|
153 |
-
|
154 |
-
raise LookupError('Got unknown file type')
|
155 |
-
|
156 |
-
|
157 |
-
def _iter_open_tar(tar_obj, extract_dir, progress_filter):
|
158 |
-
"""Emit member-destination pairs from a tar archive."""
|
159 |
-
# don't do any chowning!
|
160 |
-
tar_obj.chown = lambda *args: None
|
161 |
-
|
162 |
-
with contextlib.closing(tar_obj):
|
163 |
-
for member in tar_obj:
|
164 |
-
name = member.name
|
165 |
-
# don't extract absolute paths or ones with .. in them
|
166 |
-
if name.startswith('/') or '..' in name.split('/'):
|
167 |
-
continue
|
168 |
-
|
169 |
-
prelim_dst = os.path.join(extract_dir, *name.split('/'))
|
170 |
-
|
171 |
-
try:
|
172 |
-
member = _resolve_tar_file_or_dir(tar_obj, member)
|
173 |
-
except LookupError:
|
174 |
-
continue
|
175 |
-
|
176 |
-
final_dst = progress_filter(name, prelim_dst)
|
177 |
-
if not final_dst:
|
178 |
-
continue
|
179 |
-
|
180 |
-
if final_dst.endswith(os.sep):
|
181 |
-
final_dst = final_dst[:-1]
|
182 |
-
|
183 |
-
yield member, final_dst
|
184 |
-
|
185 |
-
|
186 |
-
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
|
187 |
-
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
|
188 |
-
|
189 |
-
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
|
190 |
-
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
|
191 |
-
of the `progress_filter` argument.
|
192 |
-
"""
|
193 |
-
try:
|
194 |
-
tarobj = tarfile.open(filename)
|
195 |
-
except tarfile.TarError as e:
|
196 |
-
raise UnrecognizedFormat(
|
197 |
-
"%s is not a compressed or uncompressed tar file" % (filename,)
|
198 |
-
) from e
|
199 |
-
|
200 |
-
for member, final_dst in _iter_open_tar(
|
201 |
-
tarobj, extract_dir, progress_filter,
|
202 |
-
):
|
203 |
-
try:
|
204 |
-
# XXX Ugh
|
205 |
-
tarobj._extract_member(member, final_dst)
|
206 |
-
except tarfile.ExtractError:
|
207 |
-
# chown/chmod/mkfifo/mknode/makedev failed
|
208 |
-
pass
|
209 |
-
|
210 |
-
return True
|
211 |
-
|
212 |
-
|
213 |
-
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BAAI/AltDiffusion-m9/share_btn.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
2 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
3 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
4 |
-
</svg>"""
|
5 |
-
|
6 |
-
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
|
7 |
-
style="color: #ffffff;
|
8 |
-
"
|
9 |
-
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
|
10 |
-
|
11 |
-
share_js = """async () => {
|
12 |
-
async function uploadFile(file){
|
13 |
-
const UPLOAD_URL = 'https://huggingface.co/uploads';
|
14 |
-
const response = await fetch(UPLOAD_URL, {
|
15 |
-
method: 'POST',
|
16 |
-
headers: {
|
17 |
-
'Content-Type': file.type,
|
18 |
-
'X-Requested-With': 'XMLHttpRequest',
|
19 |
-
},
|
20 |
-
body: file, /// <- File inherits from Blob
|
21 |
-
});
|
22 |
-
const url = await response.text();
|
23 |
-
return url;
|
24 |
-
}
|
25 |
-
const gradioEl = document.querySelector('body > gradio-app');
|
26 |
-
const imgEls = gradioEl.querySelectorAll('#gallery img');
|
27 |
-
const promptTxt = gradioEl.querySelector('#prompt-text-input input').value;
|
28 |
-
const shareBtnEl = gradioEl.querySelector('#share-btn');
|
29 |
-
const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
|
30 |
-
const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
|
31 |
-
if(!imgEls.length){
|
32 |
-
return;
|
33 |
-
};
|
34 |
-
shareBtnEl.style.pointerEvents = 'none';
|
35 |
-
shareIconEl.style.display = 'none';
|
36 |
-
loadingIconEl.style.removeProperty('display');
|
37 |
-
const files = await Promise.all(
|
38 |
-
[...imgEls].map(async (imgEl) => {
|
39 |
-
const res = await fetch(imgEl.src);
|
40 |
-
const blob = await res.blob();
|
41 |
-
const imgId = Date.now() % 200;
|
42 |
-
const fileName = `diffuse-the-rest-${{imgId}}.png`;
|
43 |
-
return new File([blob], fileName, { type: 'image/png' });
|
44 |
-
})
|
45 |
-
);
|
46 |
-
const urls = await Promise.all(files.map((f) => uploadFile(f)));
|
47 |
-
const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`);
|
48 |
-
const descriptionMd = `<div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'>
|
49 |
-
${htmlImgs.join(`\n`)}
|
50 |
-
</div>`;
|
51 |
-
const params = new URLSearchParams({
|
52 |
-
title: promptTxt,
|
53 |
-
description: descriptionMd,
|
54 |
-
});
|
55 |
-
const paramsStr = params.toString();
|
56 |
-
window.open(`https://huggingface.co/spaces/BAAI/bilingual_stable_diffusion/discussions/new?${paramsStr}`, '_blank');
|
57 |
-
shareBtnEl.style.removeProperty('pointer-events');
|
58 |
-
shareIconEl.style.removeProperty('display');
|
59 |
-
loadingIconEl.style.display = 'none';
|
60 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/infer/modules/onnx/export.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from infer.lib.infer_pack.models_onnx import SynthesizerTrnMsNSFsidM
|
4 |
-
|
5 |
-
|
6 |
-
def export_onnx(ModelPath, ExportedPath):
|
7 |
-
cpt = torch.load(ModelPath, map_location="cpu")
|
8 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0]
|
9 |
-
vec_channels = 256 if cpt.get("version", "v1") == "v1" else 768
|
10 |
-
|
11 |
-
test_phone = torch.rand(1, 200, vec_channels) # hidden unit
|
12 |
-
test_phone_lengths = torch.tensor([200]).long() # hidden unit 长度(貌似没啥用)
|
13 |
-
test_pitch = torch.randint(size=(1, 200), low=5, high=255) # 基频(单位赫兹)
|
14 |
-
test_pitchf = torch.rand(1, 200) # nsf基频
|
15 |
-
test_ds = torch.LongTensor([0]) # 说话人ID
|
16 |
-
test_rnd = torch.rand(1, 192, 200) # 噪声(加入随机因子)
|
17 |
-
|
18 |
-
device = "cpu" # 导出时设备(不影响使用模型)
|
19 |
-
|
20 |
-
net_g = SynthesizerTrnMsNSFsidM(
|
21 |
-
*cpt["config"], is_half=False, version=cpt.get("version", "v1")
|
22 |
-
) # fp32导出(C++要支持fp16必须手动将内存重新排列所以暂时不用fp16)
|
23 |
-
net_g.load_state_dict(cpt["weight"], strict=False)
|
24 |
-
input_names = ["phone", "phone_lengths", "pitch", "pitchf", "ds", "rnd"]
|
25 |
-
output_names = [
|
26 |
-
"audio",
|
27 |
-
]
|
28 |
-
# net_g.construct_spkmixmap(n_speaker) 多角色混合轨道导出
|
29 |
-
torch.onnx.export(
|
30 |
-
net_g,
|
31 |
-
(
|
32 |
-
test_phone.to(device),
|
33 |
-
test_phone_lengths.to(device),
|
34 |
-
test_pitch.to(device),
|
35 |
-
test_pitchf.to(device),
|
36 |
-
test_ds.to(device),
|
37 |
-
test_rnd.to(device),
|
38 |
-
),
|
39 |
-
ExportedPath,
|
40 |
-
dynamic_axes={
|
41 |
-
"phone": [1],
|
42 |
-
"pitch": [1],
|
43 |
-
"pitchf": [1],
|
44 |
-
"rnd": [2],
|
45 |
-
},
|
46 |
-
do_constant_folding=False,
|
47 |
-
opset_version=13,
|
48 |
-
verbose=False,
|
49 |
-
input_names=input_names,
|
50 |
-
output_names=output_names,
|
51 |
-
)
|
52 |
-
return "Finished"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat_new/src/lib/types/Message.ts
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
export interface Message {
|
2 |
-
from: "user" | "assistant";
|
3 |
-
id: ReturnType<typeof crypto.randomUUID>;
|
4 |
-
content: string;
|
5 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
# For backwards compatibility, provide imports that used to be here.
|
4 |
-
from .connection import is_connection_dropped
|
5 |
-
from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
|
6 |
-
from .response import is_fp_closed
|
7 |
-
from .retry import Retry
|
8 |
-
from .ssl_ import (
|
9 |
-
ALPN_PROTOCOLS,
|
10 |
-
HAS_SNI,
|
11 |
-
IS_PYOPENSSL,
|
12 |
-
IS_SECURETRANSPORT,
|
13 |
-
PROTOCOL_TLS,
|
14 |
-
SSLContext,
|
15 |
-
assert_fingerprint,
|
16 |
-
resolve_cert_reqs,
|
17 |
-
resolve_ssl_version,
|
18 |
-
ssl_wrap_socket,
|
19 |
-
)
|
20 |
-
from .timeout import Timeout, current_time
|
21 |
-
from .url import Url, get_host, parse_url, split_first
|
22 |
-
from .wait import wait_for_read, wait_for_write
|
23 |
-
|
24 |
-
__all__ = (
|
25 |
-
"HAS_SNI",
|
26 |
-
"IS_PYOPENSSL",
|
27 |
-
"IS_SECURETRANSPORT",
|
28 |
-
"SSLContext",
|
29 |
-
"PROTOCOL_TLS",
|
30 |
-
"ALPN_PROTOCOLS",
|
31 |
-
"Retry",
|
32 |
-
"Timeout",
|
33 |
-
"Url",
|
34 |
-
"assert_fingerprint",
|
35 |
-
"current_time",
|
36 |
-
"is_connection_dropped",
|
37 |
-
"is_fp_closed",
|
38 |
-
"get_host",
|
39 |
-
"parse_url",
|
40 |
-
"make_headers",
|
41 |
-
"resolve_cert_reqs",
|
42 |
-
"resolve_ssl_version",
|
43 |
-
"split_first",
|
44 |
-
"ssl_wrap_socket",
|
45 |
-
"wait_for_read",
|
46 |
-
"wait_for_write",
|
47 |
-
"SKIP_HEADER",
|
48 |
-
"SKIPPABLE_HEADERS",
|
49 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigChungux/Pet_Survey/app.py
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
### ----------------------------- ###
|
2 |
-
### libraries ###
|
3 |
-
### ----------------------------- ###
|
4 |
-
|
5 |
-
import gradio as gr
|
6 |
-
import pandas as pd
|
7 |
-
import numpy as np
|
8 |
-
from sklearn.model_selection import train_test_split
|
9 |
-
from sklearn.linear_model import LogisticRegression
|
10 |
-
from sklearn import metrics
|
11 |
-
|
12 |
-
|
13 |
-
### ------------------------------ ###
|
14 |
-
### data transformation ###
|
15 |
-
### ------------------------------ ###
|
16 |
-
|
17 |
-
# load dataset
|
18 |
-
uncleaned_data = pd.read_csv('data.csv')
|
19 |
-
|
20 |
-
# remove timestamp from dataset (always first column)
|
21 |
-
uncleaned_data = uncleaned_data.iloc[: , 1:]
|
22 |
-
data = pd.DataFrame()
|
23 |
-
|
24 |
-
# keep track of which columns are categorical and what
|
25 |
-
# those columns' value mappings are
|
26 |
-
# structure: {colname1: {...}, colname2: {...} }
|
27 |
-
cat_value_dicts = {}
|
28 |
-
final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1]
|
29 |
-
|
30 |
-
# for each column...
|
31 |
-
for (colname, colval) in uncleaned_data.iteritems():
|
32 |
-
|
33 |
-
# check if col is already a number; if so, add col directly
|
34 |
-
# to new dataframe and skip to next column
|
35 |
-
if isinstance(colval.values[0], (np.integer, float)):
|
36 |
-
data[colname] = uncleaned_data[colname].copy()
|
37 |
-
continue
|
38 |
-
|
39 |
-
# structure: {0: "lilac", 1: "blue", ...}
|
40 |
-
new_dict = {}
|
41 |
-
val = 0 # first index per column
|
42 |
-
transformed_col_vals = [] # new numeric datapoints
|
43 |
-
|
44 |
-
# if not, for each item in that column...
|
45 |
-
for (row, item) in enumerate(colval.values):
|
46 |
-
|
47 |
-
# if item is not in this col's dict...
|
48 |
-
if item not in new_dict:
|
49 |
-
new_dict[item] = val
|
50 |
-
val += 1
|
51 |
-
|
52 |
-
# then add numerical value to transformed dataframe
|
53 |
-
transformed_col_vals.append(new_dict[item])
|
54 |
-
|
55 |
-
# reverse dictionary only for final col (0, 1) => (vals)
|
56 |
-
if colname == final_colname:
|
57 |
-
new_dict = {value : key for (key, value) in new_dict.items()}
|
58 |
-
|
59 |
-
cat_value_dicts[colname] = new_dict
|
60 |
-
data[colname] = transformed_col_vals
|
61 |
-
|
62 |
-
|
63 |
-
### -------------------------------- ###
|
64 |
-
### model training ###
|
65 |
-
### -------------------------------- ###
|
66 |
-
|
67 |
-
# select features and predicton; automatically selects last column as prediction
|
68 |
-
cols = len(data.columns)
|
69 |
-
num_features = cols - 1
|
70 |
-
x = data.iloc[: , :num_features]
|
71 |
-
y = data.iloc[: , num_features:]
|
72 |
-
|
73 |
-
# split data into training and testing sets
|
74 |
-
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
|
75 |
-
|
76 |
-
# instantiate the model (using default parameters)
|
77 |
-
model = LogisticRegression()
|
78 |
-
model.fit(x_train, y_train.values.ravel())
|
79 |
-
y_pred = model.predict(x_test)
|
80 |
-
|
81 |
-
|
82 |
-
### -------------------------------- ###
|
83 |
-
### article generation ###
|
84 |
-
### -------------------------------- ###
|
85 |
-
# borrow file reading function from reader.py
|
86 |
-
|
87 |
-
def get_feat():
|
88 |
-
feats = [abs(x) for x in model.coef_[0]]
|
89 |
-
max_val = max(feats)
|
90 |
-
idx = feats.index(max_val)
|
91 |
-
return data.columns[idx]
|
92 |
-
|
93 |
-
acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%"
|
94 |
-
most_imp_feat = get_feat()
|
95 |
-
# info = get_article(acc, most_imp_feat)
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
### ------------------------------- ###
|
100 |
-
### interface creation ###
|
101 |
-
### ------------------------------- ###
|
102 |
-
|
103 |
-
|
104 |
-
# predictor for generic number of features
|
105 |
-
def general_predictor(*args):
|
106 |
-
features = []
|
107 |
-
|
108 |
-
# transform categorical input
|
109 |
-
for colname, arg in zip(data.columns, args):
|
110 |
-
if (colname in cat_value_dicts):
|
111 |
-
features.append(cat_value_dicts[colname][arg])
|
112 |
-
else:
|
113 |
-
features.append(arg)
|
114 |
-
|
115 |
-
# predict single datapoint
|
116 |
-
new_input = [features]
|
117 |
-
result = model.predict(new_input)
|
118 |
-
return cat_value_dicts[final_colname][result[0]]
|
119 |
-
|
120 |
-
# add data labels to replace those lost via star-args
|
121 |
-
|
122 |
-
|
123 |
-
block = gr.Blocks()
|
124 |
-
|
125 |
-
with open('info.md') as f:
|
126 |
-
with block:
|
127 |
-
gr.Markdown(f.readline())
|
128 |
-
gr.Markdown('Take the quiz to get a personalized recommendation using AI.')
|
129 |
-
|
130 |
-
with gr.Row():
|
131 |
-
with gr.Box():
|
132 |
-
inputls = []
|
133 |
-
for colname in data.columns:
|
134 |
-
# skip last column
|
135 |
-
if colname == final_colname:
|
136 |
-
continue
|
137 |
-
|
138 |
-
# access categories dict if data is categorical
|
139 |
-
# otherwise, just use a number input
|
140 |
-
if colname in cat_value_dicts:
|
141 |
-
radio_options = list(cat_value_dicts[colname].keys())
|
142 |
-
inputls.append(gr.inputs.Dropdown(choices=radio_options, type="value", label=colname))
|
143 |
-
else:
|
144 |
-
# add numerical input
|
145 |
-
inputls.append(gr.inputs.Number(label=colname))
|
146 |
-
gr.Markdown("<br />")
|
147 |
-
|
148 |
-
submit = gr.Button("Click to see your personalized result!", variant="primary")
|
149 |
-
gr.Markdown("<br />")
|
150 |
-
output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here")
|
151 |
-
|
152 |
-
submit.click(fn=general_predictor, inputs=inputls, outputs=output)
|
153 |
-
gr.Markdown("<br />")
|
154 |
-
|
155 |
-
with gr.Row():
|
156 |
-
with gr.Box():
|
157 |
-
gr.Markdown(f"<h3>Accuracy: </h3>{acc}")
|
158 |
-
with gr.Box():
|
159 |
-
gr.Markdown(f"<h3>Most important feature: </h3>{most_imp_feat}")
|
160 |
-
|
161 |
-
gr.Markdown("<br />")
|
162 |
-
|
163 |
-
with gr.Box():
|
164 |
-
gr.Markdown('''⭐ Note that model accuracy is based on the uploaded data.csv and reflects how well the AI model can give correct recommendations for <em>that dataset</em>. Model accuracy and most important feature can be helpful for understanding how the model works, but <em>should not be considered absolute facts about the real world</em>.''')
|
165 |
-
|
166 |
-
with gr.Box():
|
167 |
-
with open('info.md') as f:
|
168 |
-
f.readline()
|
169 |
-
gr.Markdown(f.read())
|
170 |
-
|
171 |
-
# show the interface
|
172 |
-
block.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Boadiwaa/Recipes/openai/api_requestor.py
DELETED
@@ -1,365 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import platform
|
3 |
-
import threading
|
4 |
-
import warnings
|
5 |
-
from email import header
|
6 |
-
from json import JSONDecodeError
|
7 |
-
from typing import Dict, Iterator, Optional, Tuple, Union
|
8 |
-
from urllib.parse import urlencode, urlsplit, urlunsplit
|
9 |
-
|
10 |
-
import requests
|
11 |
-
|
12 |
-
import openai
|
13 |
-
from openai import error, util, version
|
14 |
-
from openai.openai_response import OpenAIResponse
|
15 |
-
from openai.util import ApiType
|
16 |
-
|
17 |
-
TIMEOUT_SECS = 600
|
18 |
-
MAX_CONNECTION_RETRIES = 2
|
19 |
-
|
20 |
-
# Has one attribute per thread, 'session'.
|
21 |
-
_thread_context = threading.local()
|
22 |
-
|
23 |
-
|
24 |
-
def _build_api_url(url, query):
|
25 |
-
scheme, netloc, path, base_query, fragment = urlsplit(url)
|
26 |
-
|
27 |
-
if base_query:
|
28 |
-
query = "%s&%s" % (base_query, query)
|
29 |
-
|
30 |
-
return urlunsplit((scheme, netloc, path, query, fragment))
|
31 |
-
|
32 |
-
|
33 |
-
def _requests_proxies_arg(proxy) -> Optional[Dict[str, str]]:
|
34 |
-
"""Returns a value suitable for the 'proxies' argument to 'requests.request."""
|
35 |
-
if proxy is None:
|
36 |
-
return None
|
37 |
-
elif isinstance(proxy, str):
|
38 |
-
return {"http": proxy, "https": proxy}
|
39 |
-
elif isinstance(proxy, dict):
|
40 |
-
return proxy.copy()
|
41 |
-
else:
|
42 |
-
raise ValueError(
|
43 |
-
"'openai.proxy' must be specified as either a string URL or a dict with string URL under the https and/or http keys."
|
44 |
-
)
|
45 |
-
|
46 |
-
|
47 |
-
def _make_session() -> requests.Session:
|
48 |
-
if not openai.verify_ssl_certs:
|
49 |
-
warnings.warn("verify_ssl_certs is ignored; openai always verifies.")
|
50 |
-
s = requests.Session()
|
51 |
-
proxies = _requests_proxies_arg(openai.proxy)
|
52 |
-
if proxies:
|
53 |
-
s.proxies = proxies
|
54 |
-
s.mount(
|
55 |
-
"https://",
|
56 |
-
requests.adapters.HTTPAdapter(max_retries=MAX_CONNECTION_RETRIES),
|
57 |
-
)
|
58 |
-
return s
|
59 |
-
|
60 |
-
|
61 |
-
def parse_stream(rbody):
|
62 |
-
for line in rbody:
|
63 |
-
if line:
|
64 |
-
if line == b"data: [DONE]":
|
65 |
-
# return here will cause GeneratorExit exception in urllib3
|
66 |
-
# and it will close http connection with TCP Reset
|
67 |
-
continue
|
68 |
-
if hasattr(line, "decode"):
|
69 |
-
line = line.decode("utf-8")
|
70 |
-
if line.startswith("data: "):
|
71 |
-
line = line[len("data: ") :]
|
72 |
-
yield line
|
73 |
-
|
74 |
-
|
75 |
-
class APIRequestor:
|
76 |
-
def __init__(
|
77 |
-
self,
|
78 |
-
key=None,
|
79 |
-
api_base=None,
|
80 |
-
api_type=None,
|
81 |
-
api_version=None,
|
82 |
-
organization=None,
|
83 |
-
):
|
84 |
-
self.api_base = api_base or openai.api_base
|
85 |
-
self.api_key = key or util.default_api_key()
|
86 |
-
self.api_type = (
|
87 |
-
ApiType.from_str(api_type)
|
88 |
-
if api_type
|
89 |
-
else ApiType.from_str(openai.api_type)
|
90 |
-
)
|
91 |
-
self.api_version = api_version or openai.api_version
|
92 |
-
self.organization = organization or openai.organization
|
93 |
-
|
94 |
-
@classmethod
|
95 |
-
def format_app_info(cls, info):
|
96 |
-
str = info["name"]
|
97 |
-
if info["version"]:
|
98 |
-
str += "/%s" % (info["version"],)
|
99 |
-
if info["url"]:
|
100 |
-
str += " (%s)" % (info["url"],)
|
101 |
-
return str
|
102 |
-
|
103 |
-
def request(
|
104 |
-
self,
|
105 |
-
method,
|
106 |
-
url,
|
107 |
-
params=None,
|
108 |
-
headers=None,
|
109 |
-
files=None,
|
110 |
-
stream=False,
|
111 |
-
request_id: Optional[str] = None,
|
112 |
-
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool, str]:
|
113 |
-
result = self.request_raw(
|
114 |
-
method.lower(),
|
115 |
-
url,
|
116 |
-
params=params,
|
117 |
-
supplied_headers=headers,
|
118 |
-
files=files,
|
119 |
-
stream=stream,
|
120 |
-
request_id=request_id,
|
121 |
-
)
|
122 |
-
resp, got_stream = self._interpret_response(result, stream)
|
123 |
-
return resp, got_stream, self.api_key
|
124 |
-
|
125 |
-
def handle_error_response(self, rbody, rcode, resp, rheaders, stream_error=False):
|
126 |
-
try:
|
127 |
-
error_data = resp["error"]
|
128 |
-
except (KeyError, TypeError):
|
129 |
-
raise error.APIError(
|
130 |
-
"Invalid response object from API: %r (HTTP response code "
|
131 |
-
"was %d)" % (rbody, rcode),
|
132 |
-
rbody,
|
133 |
-
rcode,
|
134 |
-
resp,
|
135 |
-
)
|
136 |
-
|
137 |
-
if "internal_message" in error_data:
|
138 |
-
error_data["message"] += "\n\n" + error_data["internal_message"]
|
139 |
-
|
140 |
-
util.log_info(
|
141 |
-
"OpenAI API error received",
|
142 |
-
error_code=error_data.get("code"),
|
143 |
-
error_type=error_data.get("type"),
|
144 |
-
error_message=error_data.get("message"),
|
145 |
-
error_param=error_data.get("param"),
|
146 |
-
stream_error=stream_error,
|
147 |
-
)
|
148 |
-
|
149 |
-
# Rate limits were previously coded as 400's with code 'rate_limit'
|
150 |
-
if rcode == 429:
|
151 |
-
return error.RateLimitError(
|
152 |
-
error_data.get("message"), rbody, rcode, resp, rheaders
|
153 |
-
)
|
154 |
-
elif rcode in [400, 404, 415]:
|
155 |
-
return error.InvalidRequestError(
|
156 |
-
error_data.get("message"),
|
157 |
-
error_data.get("param"),
|
158 |
-
error_data.get("code"),
|
159 |
-
rbody,
|
160 |
-
rcode,
|
161 |
-
resp,
|
162 |
-
rheaders,
|
163 |
-
)
|
164 |
-
elif rcode == 401:
|
165 |
-
return error.AuthenticationError(
|
166 |
-
error_data.get("message"), rbody, rcode, resp, rheaders
|
167 |
-
)
|
168 |
-
elif rcode == 403:
|
169 |
-
return error.PermissionError(
|
170 |
-
error_data.get("message"), rbody, rcode, resp, rheaders
|
171 |
-
)
|
172 |
-
elif rcode == 409:
|
173 |
-
return error.TryAgain(
|
174 |
-
error_data.get("message"), rbody, rcode, resp, rheaders
|
175 |
-
)
|
176 |
-
elif stream_error:
|
177 |
-
# TODO: we will soon attach status codes to stream errors
|
178 |
-
parts = [error_data.get("message"), "(Error occurred while streaming.)"]
|
179 |
-
message = " ".join([p for p in parts if p is not None])
|
180 |
-
return error.APIError(message, rbody, rcode, resp, rheaders)
|
181 |
-
else:
|
182 |
-
return error.APIError(
|
183 |
-
error_data.get("message"), rbody, rcode, resp, rheaders
|
184 |
-
)
|
185 |
-
|
186 |
-
def request_headers(
|
187 |
-
self, method: str, extra, request_id: Optional[str]
|
188 |
-
) -> Dict[str, str]:
|
189 |
-
user_agent = "OpenAI/v1 PythonBindings/%s" % (version.VERSION,)
|
190 |
-
if openai.app_info:
|
191 |
-
user_agent += " " + self.format_app_info(openai.app_info)
|
192 |
-
|
193 |
-
uname_without_node = " ".join(
|
194 |
-
v for k, v in platform.uname()._asdict().items() if k != "node"
|
195 |
-
)
|
196 |
-
ua = {
|
197 |
-
"bindings_version": version.VERSION,
|
198 |
-
"httplib": "requests",
|
199 |
-
"lang": "python",
|
200 |
-
"lang_version": platform.python_version(),
|
201 |
-
"platform": platform.platform(),
|
202 |
-
"publisher": "openai",
|
203 |
-
"uname": uname_without_node,
|
204 |
-
}
|
205 |
-
if openai.app_info:
|
206 |
-
ua["application"] = openai.app_info
|
207 |
-
|
208 |
-
headers = {
|
209 |
-
"X-OpenAI-Client-User-Agent": json.dumps(ua),
|
210 |
-
"User-Agent": user_agent,
|
211 |
-
}
|
212 |
-
|
213 |
-
headers.update(util.api_key_to_header(self.api_type, self.api_key))
|
214 |
-
|
215 |
-
if self.organization:
|
216 |
-
headers["OpenAI-Organization"] = self.organization
|
217 |
-
|
218 |
-
if self.api_version is not None and self.api_type == ApiType.OPEN_AI:
|
219 |
-
headers["OpenAI-Version"] = self.api_version
|
220 |
-
if request_id is not None:
|
221 |
-
headers["X-Request-Id"] = request_id
|
222 |
-
if openai.debug:
|
223 |
-
headers["OpenAI-Debug"] = "true"
|
224 |
-
headers.update(extra)
|
225 |
-
|
226 |
-
return headers
|
227 |
-
|
228 |
-
def _validate_headers(
|
229 |
-
self, supplied_headers: Optional[Dict[str, str]]
|
230 |
-
) -> Dict[str, str]:
|
231 |
-
headers: Dict[str, str] = {}
|
232 |
-
if supplied_headers is None:
|
233 |
-
return headers
|
234 |
-
|
235 |
-
if not isinstance(supplied_headers, dict):
|
236 |
-
raise TypeError("Headers must be a dictionary")
|
237 |
-
|
238 |
-
for k, v in supplied_headers.items():
|
239 |
-
if not isinstance(k, str):
|
240 |
-
raise TypeError("Header keys must be strings")
|
241 |
-
if not isinstance(v, str):
|
242 |
-
raise TypeError("Header values must be strings")
|
243 |
-
headers[k] = v
|
244 |
-
|
245 |
-
# NOTE: It is possible to do more validation of the headers, but a request could always
|
246 |
-
# be made to the API manually with invalid headers, so we need to handle them server side.
|
247 |
-
|
248 |
-
return headers
|
249 |
-
|
250 |
-
def request_raw(
|
251 |
-
self,
|
252 |
-
method,
|
253 |
-
url,
|
254 |
-
*,
|
255 |
-
params=None,
|
256 |
-
supplied_headers: Dict[str, str] = None,
|
257 |
-
files=None,
|
258 |
-
stream: bool = False,
|
259 |
-
request_id: Optional[str] = None,
|
260 |
-
) -> requests.Response:
|
261 |
-
abs_url = "%s%s" % (self.api_base, url)
|
262 |
-
headers = self._validate_headers(supplied_headers)
|
263 |
-
|
264 |
-
data = None
|
265 |
-
if method == "get" or method == "delete":
|
266 |
-
if params:
|
267 |
-
encoded_params = urlencode(
|
268 |
-
[(k, v) for k, v in params.items() if v is not None]
|
269 |
-
)
|
270 |
-
abs_url = _build_api_url(abs_url, encoded_params)
|
271 |
-
elif method in {"post", "put"}:
|
272 |
-
if params and files:
|
273 |
-
raise ValueError("At most one of params and files may be specified.")
|
274 |
-
if params:
|
275 |
-
data = json.dumps(params).encode()
|
276 |
-
headers["Content-Type"] = "application/json"
|
277 |
-
else:
|
278 |
-
raise error.APIConnectionError(
|
279 |
-
"Unrecognized HTTP method %r. This may indicate a bug in the "
|
280 |
-
"OpenAI bindings. Please contact [email protected] for "
|
281 |
-
"assistance." % (method,)
|
282 |
-
)
|
283 |
-
|
284 |
-
headers = self.request_headers(method, headers, request_id)
|
285 |
-
|
286 |
-
util.log_info("Request to OpenAI API", method=method, path=abs_url)
|
287 |
-
util.log_debug("Post details", data=data, api_version=self.api_version)
|
288 |
-
|
289 |
-
if not hasattr(_thread_context, "session"):
|
290 |
-
_thread_context.session = _make_session()
|
291 |
-
try:
|
292 |
-
result = _thread_context.session.request(
|
293 |
-
method,
|
294 |
-
abs_url,
|
295 |
-
headers=headers,
|
296 |
-
data=data,
|
297 |
-
files=files,
|
298 |
-
stream=stream,
|
299 |
-
timeout=TIMEOUT_SECS,
|
300 |
-
)
|
301 |
-
except requests.exceptions.RequestException as e:
|
302 |
-
raise error.APIConnectionError("Error communicating with OpenAI") from e
|
303 |
-
util.log_info(
|
304 |
-
"OpenAI API response",
|
305 |
-
path=abs_url,
|
306 |
-
response_code=result.status_code,
|
307 |
-
processing_ms=result.headers.get("OpenAI-Processing-Ms"),
|
308 |
-
)
|
309 |
-
# Don't read the whole stream for debug logging unless necessary.
|
310 |
-
if openai.log == "debug":
|
311 |
-
util.log_debug(
|
312 |
-
"API response body", body=result.content, headers=result.headers
|
313 |
-
)
|
314 |
-
return result
|
315 |
-
|
316 |
-
def _interpret_response(
|
317 |
-
self, result: requests.Response, stream: bool
|
318 |
-
) -> Tuple[Union[OpenAIResponse, Iterator[OpenAIResponse]], bool]:
|
319 |
-
"""Returns the response(s) and a bool indicating whether it is a stream."""
|
320 |
-
if stream and "text/event-stream" in result.headers.get("Content-Type", ""):
|
321 |
-
return (
|
322 |
-
self._interpret_response_line(
|
323 |
-
line, result.status_code, result.headers, stream=True
|
324 |
-
)
|
325 |
-
for line in parse_stream(result.iter_lines())
|
326 |
-
), True
|
327 |
-
else:
|
328 |
-
return (
|
329 |
-
self._interpret_response_line(
|
330 |
-
result.content, result.status_code, result.headers, stream=False
|
331 |
-
),
|
332 |
-
False,
|
333 |
-
)
|
334 |
-
|
335 |
-
def _interpret_response_line(
|
336 |
-
self, rbody, rcode, rheaders, stream: bool
|
337 |
-
) -> OpenAIResponse:
|
338 |
-
# HTTP 204 response code does not have any content in the body.
|
339 |
-
if rcode == 204:
|
340 |
-
return OpenAIResponse(None, rheaders)
|
341 |
-
|
342 |
-
if rcode == 503:
|
343 |
-
raise error.ServiceUnavailableError(
|
344 |
-
"The server is overloaded or not ready yet.",
|
345 |
-
rbody,
|
346 |
-
rcode,
|
347 |
-
headers=rheaders,
|
348 |
-
)
|
349 |
-
try:
|
350 |
-
if hasattr(rbody, "decode"):
|
351 |
-
rbody = rbody.decode("utf-8")
|
352 |
-
data = json.loads(rbody)
|
353 |
-
except (JSONDecodeError, UnicodeDecodeError):
|
354 |
-
raise error.APIError(
|
355 |
-
f"HTTP code {rcode} from API ({rbody})", rbody, rcode, headers=rheaders
|
356 |
-
)
|
357 |
-
resp = OpenAIResponse(data, rheaders)
|
358 |
-
# In the future, we might add a "status" parameter to errors
|
359 |
-
# to better handle the "error while streaming" case.
|
360 |
-
stream_error = stream and "error" in resp.data
|
361 |
-
if stream_error or not 200 <= rcode < 300:
|
362 |
-
raise self.handle_error_response(
|
363 |
-
rbody, rcode, resp.data, rheaders, stream_error=stream_error
|
364 |
-
)
|
365 |
-
return resp
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CK42/sentiment-model-comparison/app.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
-
|
4 |
-
app = gr.Blocks()
|
5 |
-
|
6 |
-
model_id_1 = "nlptown/bert-base-multilingual-uncased-sentiment"
|
7 |
-
model_id_2 = "microsoft/deberta-xlarge-mnli"
|
8 |
-
model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english"
|
9 |
-
model_id_4 = "lordtt13/emo-mobilebert"
|
10 |
-
model_id_5 = "juliensimon/reviews-sentiment-analysis"
|
11 |
-
model_id_6 = "sbcBI/sentiment_analysis_model"
|
12 |
-
|
13 |
-
def parse_output(output_json):
|
14 |
-
list_pred=[]
|
15 |
-
for i in range(len(output_json[0])):
|
16 |
-
label = output_json[0][i]['label']
|
17 |
-
score = output_json[0][i]['score']
|
18 |
-
list_pred.append((label, score))
|
19 |
-
return list_pred
|
20 |
-
|
21 |
-
def get_prediction(model_id):
|
22 |
-
|
23 |
-
classifier = pipeline("text-classification", model=model_id, return_all_scores=True)
|
24 |
-
|
25 |
-
def predict(review):
|
26 |
-
prediction = classifier(review)
|
27 |
-
print(prediction)
|
28 |
-
return parse_output(prediction)
|
29 |
-
return predict
|
30 |
-
|
31 |
-
with app:
|
32 |
-
gr.Markdown(
|
33 |
-
"""
|
34 |
-
# Compare Sentiment Analysis Models
|
35 |
-
|
36 |
-
Type text to predict sentiment.
|
37 |
-
""")
|
38 |
-
with gr.Row():
|
39 |
-
inp_1= gr.Textbox(label="Type text here.",placeholder="The customer service was satisfactory.")
|
40 |
-
gr.Markdown(
|
41 |
-
"""
|
42 |
-
**Model Predictions**
|
43 |
-
""")
|
44 |
-
with gr.Row():
|
45 |
-
with gr.Column():
|
46 |
-
gr.Markdown(
|
47 |
-
"""
|
48 |
-
Model 1 = nlptown/bert-base-multilingual-uncased-sentiment
|
49 |
-
""")
|
50 |
-
btn1 = gr.Button("Predict - Model 1")
|
51 |
-
gr.Markdown(
|
52 |
-
"""
|
53 |
-
Model 2 = microsoft/deberta-xlarge-mnli
|
54 |
-
""")
|
55 |
-
btn2 = gr.Button("Predict - Model 2")
|
56 |
-
gr.Markdown(
|
57 |
-
"""
|
58 |
-
Model 3 = distilbert-base-uncased-finetuned-sst-2-english"
|
59 |
-
""")
|
60 |
-
btn3 = gr.Button("Predict - Model 3")
|
61 |
-
gr.Markdown(
|
62 |
-
"""
|
63 |
-
Model 4 = lordtt13/emo-mobilebert
|
64 |
-
""")
|
65 |
-
btn4 = gr.Button("Predict - Model 4")
|
66 |
-
gr.Markdown(
|
67 |
-
"""
|
68 |
-
Model 5 = juliensimon/reviews-sentiment-analysis
|
69 |
-
""")
|
70 |
-
btn5 = gr.Button("Predict - Model 5")
|
71 |
-
gr.Markdown(
|
72 |
-
"""
|
73 |
-
Model 6 = sbcBI/sentiment_analysis_model
|
74 |
-
""")
|
75 |
-
btn6 = gr.Button("Predict - Model 6")
|
76 |
-
|
77 |
-
with gr.Column():
|
78 |
-
out_1 = gr.Textbox(label="Predictions for Model 1")
|
79 |
-
out_2 = gr.Textbox(label="Predictions for Model 2")
|
80 |
-
out_3 = gr.Textbox(label="Predictions for Model 3")
|
81 |
-
out_4 = gr.Textbox(label="Predictions for Model 4")
|
82 |
-
out_5 = gr.Textbox(label="Predictions for Model 5")
|
83 |
-
out_6 = gr.Textbox(label="Predictions for Model 6")
|
84 |
-
|
85 |
-
btn1.click(fn=get_prediction(model_id_1), inputs=inp_1, outputs=out_1)
|
86 |
-
btn2.click(fn=get_prediction(model_id_2), inputs=inp_1, outputs=out_2)
|
87 |
-
btn3.click(fn=get_prediction(model_id_3), inputs=inp_1, outputs=out_3)
|
88 |
-
btn4.click(fn=get_prediction(model_id_4), inputs=inp_1, outputs=out_4)
|
89 |
-
btn5.click(fn=get_prediction(model_id_5), inputs=inp_1, outputs=out_5)
|
90 |
-
btn6.click(fn=get_prediction(model_id_6), inputs=inp_1, outputs=out_6)
|
91 |
-
|
92 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmcv_custom/__init__.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
from .checkpoint import load_checkpoint
|
4 |
-
|
5 |
-
__all__ = ['load_checkpoint']
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/dense_heads/yolact_head.py
DELETED
@@ -1,943 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
import torch.nn.functional as F
|
5 |
-
from mmcv.cnn import ConvModule, xavier_init
|
6 |
-
from mmcv.runner import force_fp32
|
7 |
-
|
8 |
-
from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply
|
9 |
-
from ..builder import HEADS, build_loss
|
10 |
-
from .anchor_head import AnchorHead
|
11 |
-
|
12 |
-
|
13 |
-
@HEADS.register_module()
|
14 |
-
class YOLACTHead(AnchorHead):
|
15 |
-
"""YOLACT box head used in https://arxiv.org/abs/1904.02689.
|
16 |
-
|
17 |
-
Note that YOLACT head is a light version of RetinaNet head.
|
18 |
-
Four differences are described as follows:
|
19 |
-
|
20 |
-
1. YOLACT box head has three-times fewer anchors.
|
21 |
-
2. YOLACT box head shares the convs for box and cls branches.
|
22 |
-
3. YOLACT box head uses OHEM instead of Focal loss.
|
23 |
-
4. YOLACT box head predicts a set of mask coefficients for each box.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
num_classes (int): Number of categories excluding the background
|
27 |
-
category.
|
28 |
-
in_channels (int): Number of channels in the input feature map.
|
29 |
-
anchor_generator (dict): Config dict for anchor generator
|
30 |
-
loss_cls (dict): Config of classification loss.
|
31 |
-
loss_bbox (dict): Config of localization loss.
|
32 |
-
num_head_convs (int): Number of the conv layers shared by
|
33 |
-
box and cls branches.
|
34 |
-
num_protos (int): Number of the mask coefficients.
|
35 |
-
use_ohem (bool): If true, ``loss_single_OHEM`` will be used for
|
36 |
-
cls loss calculation. If false, ``loss_single`` will be used.
|
37 |
-
conv_cfg (dict): Dictionary to construct and config conv layer.
|
38 |
-
norm_cfg (dict): Dictionary to construct and config norm layer.
|
39 |
-
"""
|
40 |
-
|
41 |
-
def __init__(self,
|
42 |
-
num_classes,
|
43 |
-
in_channels,
|
44 |
-
anchor_generator=dict(
|
45 |
-
type='AnchorGenerator',
|
46 |
-
octave_base_scale=3,
|
47 |
-
scales_per_octave=1,
|
48 |
-
ratios=[0.5, 1.0, 2.0],
|
49 |
-
strides=[8, 16, 32, 64, 128]),
|
50 |
-
loss_cls=dict(
|
51 |
-
type='CrossEntropyLoss',
|
52 |
-
use_sigmoid=False,
|
53 |
-
reduction='none',
|
54 |
-
loss_weight=1.0),
|
55 |
-
loss_bbox=dict(
|
56 |
-
type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
|
57 |
-
num_head_convs=1,
|
58 |
-
num_protos=32,
|
59 |
-
use_ohem=True,
|
60 |
-
conv_cfg=None,
|
61 |
-
norm_cfg=None,
|
62 |
-
**kwargs):
|
63 |
-
self.num_head_convs = num_head_convs
|
64 |
-
self.num_protos = num_protos
|
65 |
-
self.use_ohem = use_ohem
|
66 |
-
self.conv_cfg = conv_cfg
|
67 |
-
self.norm_cfg = norm_cfg
|
68 |
-
super(YOLACTHead, self).__init__(
|
69 |
-
num_classes,
|
70 |
-
in_channels,
|
71 |
-
loss_cls=loss_cls,
|
72 |
-
loss_bbox=loss_bbox,
|
73 |
-
anchor_generator=anchor_generator,
|
74 |
-
**kwargs)
|
75 |
-
if self.use_ohem:
|
76 |
-
sampler_cfg = dict(type='PseudoSampler')
|
77 |
-
self.sampler = build_sampler(sampler_cfg, context=self)
|
78 |
-
self.sampling = False
|
79 |
-
|
80 |
-
def _init_layers(self):
|
81 |
-
"""Initialize layers of the head."""
|
82 |
-
self.relu = nn.ReLU(inplace=True)
|
83 |
-
self.head_convs = nn.ModuleList()
|
84 |
-
for i in range(self.num_head_convs):
|
85 |
-
chn = self.in_channels if i == 0 else self.feat_channels
|
86 |
-
self.head_convs.append(
|
87 |
-
ConvModule(
|
88 |
-
chn,
|
89 |
-
self.feat_channels,
|
90 |
-
3,
|
91 |
-
stride=1,
|
92 |
-
padding=1,
|
93 |
-
conv_cfg=self.conv_cfg,
|
94 |
-
norm_cfg=self.norm_cfg))
|
95 |
-
self.conv_cls = nn.Conv2d(
|
96 |
-
self.feat_channels,
|
97 |
-
self.num_anchors * self.cls_out_channels,
|
98 |
-
3,
|
99 |
-
padding=1)
|
100 |
-
self.conv_reg = nn.Conv2d(
|
101 |
-
self.feat_channels, self.num_anchors * 4, 3, padding=1)
|
102 |
-
self.conv_coeff = nn.Conv2d(
|
103 |
-
self.feat_channels,
|
104 |
-
self.num_anchors * self.num_protos,
|
105 |
-
3,
|
106 |
-
padding=1)
|
107 |
-
|
108 |
-
def init_weights(self):
|
109 |
-
"""Initialize weights of the head."""
|
110 |
-
for m in self.head_convs:
|
111 |
-
xavier_init(m.conv, distribution='uniform', bias=0)
|
112 |
-
xavier_init(self.conv_cls, distribution='uniform', bias=0)
|
113 |
-
xavier_init(self.conv_reg, distribution='uniform', bias=0)
|
114 |
-
xavier_init(self.conv_coeff, distribution='uniform', bias=0)
|
115 |
-
|
116 |
-
def forward_single(self, x):
|
117 |
-
"""Forward feature of a single scale level.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
x (Tensor): Features of a single scale level.
|
121 |
-
|
122 |
-
Returns:
|
123 |
-
tuple:
|
124 |
-
cls_score (Tensor): Cls scores for a single scale level \
|
125 |
-
the channels number is num_anchors * num_classes.
|
126 |
-
bbox_pred (Tensor): Box energies / deltas for a single scale \
|
127 |
-
level, the channels number is num_anchors * 4.
|
128 |
-
coeff_pred (Tensor): Mask coefficients for a single scale \
|
129 |
-
level, the channels number is num_anchors * num_protos.
|
130 |
-
"""
|
131 |
-
for head_conv in self.head_convs:
|
132 |
-
x = head_conv(x)
|
133 |
-
cls_score = self.conv_cls(x)
|
134 |
-
bbox_pred = self.conv_reg(x)
|
135 |
-
coeff_pred = self.conv_coeff(x).tanh()
|
136 |
-
return cls_score, bbox_pred, coeff_pred
|
137 |
-
|
138 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
|
139 |
-
def loss(self,
|
140 |
-
cls_scores,
|
141 |
-
bbox_preds,
|
142 |
-
gt_bboxes,
|
143 |
-
gt_labels,
|
144 |
-
img_metas,
|
145 |
-
gt_bboxes_ignore=None):
|
146 |
-
"""A combination of the func:``AnchorHead.loss`` and
|
147 |
-
func:``SSDHead.loss``.
|
148 |
-
|
149 |
-
When ``self.use_ohem == True``, it functions like ``SSDHead.loss``,
|
150 |
-
otherwise, it follows ``AnchorHead.loss``. Besides, it additionally
|
151 |
-
returns ``sampling_results``.
|
152 |
-
|
153 |
-
Args:
|
154 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
155 |
-
Has shape (N, num_anchors * num_classes, H, W)
|
156 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
157 |
-
level with shape (N, num_anchors * 4, H, W)
|
158 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
159 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
160 |
-
gt_labels (list[Tensor]): Class indices corresponding to each box
|
161 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
162 |
-
image size, scaling factor, etc.
|
163 |
-
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
|
164 |
-
boxes can be ignored when computing the loss. Default: None
|
165 |
-
|
166 |
-
Returns:
|
167 |
-
tuple:
|
168 |
-
dict[str, Tensor]: A dictionary of loss components.
|
169 |
-
List[:obj:``SamplingResult``]: Sampler results for each image.
|
170 |
-
"""
|
171 |
-
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
|
172 |
-
assert len(featmap_sizes) == self.anchor_generator.num_levels
|
173 |
-
|
174 |
-
device = cls_scores[0].device
|
175 |
-
|
176 |
-
anchor_list, valid_flag_list = self.get_anchors(
|
177 |
-
featmap_sizes, img_metas, device=device)
|
178 |
-
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
|
179 |
-
cls_reg_targets = self.get_targets(
|
180 |
-
anchor_list,
|
181 |
-
valid_flag_list,
|
182 |
-
gt_bboxes,
|
183 |
-
img_metas,
|
184 |
-
gt_bboxes_ignore_list=gt_bboxes_ignore,
|
185 |
-
gt_labels_list=gt_labels,
|
186 |
-
label_channels=label_channels,
|
187 |
-
unmap_outputs=not self.use_ohem,
|
188 |
-
return_sampling_results=True)
|
189 |
-
if cls_reg_targets is None:
|
190 |
-
return None
|
191 |
-
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
|
192 |
-
num_total_pos, num_total_neg, sampling_results) = cls_reg_targets
|
193 |
-
|
194 |
-
if self.use_ohem:
|
195 |
-
num_images = len(img_metas)
|
196 |
-
all_cls_scores = torch.cat([
|
197 |
-
s.permute(0, 2, 3, 1).reshape(
|
198 |
-
num_images, -1, self.cls_out_channels) for s in cls_scores
|
199 |
-
], 1)
|
200 |
-
all_labels = torch.cat(labels_list, -1).view(num_images, -1)
|
201 |
-
all_label_weights = torch.cat(label_weights_list,
|
202 |
-
-1).view(num_images, -1)
|
203 |
-
all_bbox_preds = torch.cat([
|
204 |
-
b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
|
205 |
-
for b in bbox_preds
|
206 |
-
], -2)
|
207 |
-
all_bbox_targets = torch.cat(bbox_targets_list,
|
208 |
-
-2).view(num_images, -1, 4)
|
209 |
-
all_bbox_weights = torch.cat(bbox_weights_list,
|
210 |
-
-2).view(num_images, -1, 4)
|
211 |
-
|
212 |
-
# concat all level anchors to a single tensor
|
213 |
-
all_anchors = []
|
214 |
-
for i in range(num_images):
|
215 |
-
all_anchors.append(torch.cat(anchor_list[i]))
|
216 |
-
|
217 |
-
# check NaN and Inf
|
218 |
-
assert torch.isfinite(all_cls_scores).all().item(), \
|
219 |
-
'classification scores become infinite or NaN!'
|
220 |
-
assert torch.isfinite(all_bbox_preds).all().item(), \
|
221 |
-
'bbox predications become infinite or NaN!'
|
222 |
-
|
223 |
-
losses_cls, losses_bbox = multi_apply(
|
224 |
-
self.loss_single_OHEM,
|
225 |
-
all_cls_scores,
|
226 |
-
all_bbox_preds,
|
227 |
-
all_anchors,
|
228 |
-
all_labels,
|
229 |
-
all_label_weights,
|
230 |
-
all_bbox_targets,
|
231 |
-
all_bbox_weights,
|
232 |
-
num_total_samples=num_total_pos)
|
233 |
-
else:
|
234 |
-
num_total_samples = (
|
235 |
-
num_total_pos +
|
236 |
-
num_total_neg if self.sampling else num_total_pos)
|
237 |
-
|
238 |
-
# anchor number of multi levels
|
239 |
-
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
|
240 |
-
# concat all level anchors and flags to a single tensor
|
241 |
-
concat_anchor_list = []
|
242 |
-
for i in range(len(anchor_list)):
|
243 |
-
concat_anchor_list.append(torch.cat(anchor_list[i]))
|
244 |
-
all_anchor_list = images_to_levels(concat_anchor_list,
|
245 |
-
num_level_anchors)
|
246 |
-
losses_cls, losses_bbox = multi_apply(
|
247 |
-
self.loss_single,
|
248 |
-
cls_scores,
|
249 |
-
bbox_preds,
|
250 |
-
all_anchor_list,
|
251 |
-
labels_list,
|
252 |
-
label_weights_list,
|
253 |
-
bbox_targets_list,
|
254 |
-
bbox_weights_list,
|
255 |
-
num_total_samples=num_total_samples)
|
256 |
-
|
257 |
-
return dict(
|
258 |
-
loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results
|
259 |
-
|
260 |
-
def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
|
261 |
-
label_weights, bbox_targets, bbox_weights,
|
262 |
-
num_total_samples):
|
263 |
-
""""See func:``SSDHead.loss``."""
|
264 |
-
loss_cls_all = self.loss_cls(cls_score, labels, label_weights)
|
265 |
-
|
266 |
-
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
|
267 |
-
pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(
|
268 |
-
as_tuple=False).reshape(-1)
|
269 |
-
neg_inds = (labels == self.num_classes).nonzero(
|
270 |
-
as_tuple=False).view(-1)
|
271 |
-
|
272 |
-
num_pos_samples = pos_inds.size(0)
|
273 |
-
if num_pos_samples == 0:
|
274 |
-
num_neg_samples = neg_inds.size(0)
|
275 |
-
else:
|
276 |
-
num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
|
277 |
-
if num_neg_samples > neg_inds.size(0):
|
278 |
-
num_neg_samples = neg_inds.size(0)
|
279 |
-
topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
|
280 |
-
loss_cls_pos = loss_cls_all[pos_inds].sum()
|
281 |
-
loss_cls_neg = topk_loss_cls_neg.sum()
|
282 |
-
loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
|
283 |
-
if self.reg_decoded_bbox:
|
284 |
-
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
|
285 |
-
# is applied directly on the decoded bounding boxes, it
|
286 |
-
# decodes the already encoded coordinates to absolute format.
|
287 |
-
bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
|
288 |
-
loss_bbox = self.loss_bbox(
|
289 |
-
bbox_pred,
|
290 |
-
bbox_targets,
|
291 |
-
bbox_weights,
|
292 |
-
avg_factor=num_total_samples)
|
293 |
-
return loss_cls[None], loss_bbox
|
294 |
-
|
295 |
-
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds'))
|
296 |
-
def get_bboxes(self,
|
297 |
-
cls_scores,
|
298 |
-
bbox_preds,
|
299 |
-
coeff_preds,
|
300 |
-
img_metas,
|
301 |
-
cfg=None,
|
302 |
-
rescale=False):
|
303 |
-
""""Similiar to func:``AnchorHead.get_bboxes``, but additionally
|
304 |
-
processes coeff_preds.
|
305 |
-
|
306 |
-
Args:
|
307 |
-
cls_scores (list[Tensor]): Box scores for each scale level
|
308 |
-
with shape (N, num_anchors * num_classes, H, W)
|
309 |
-
bbox_preds (list[Tensor]): Box energies / deltas for each scale
|
310 |
-
level with shape (N, num_anchors * 4, H, W)
|
311 |
-
coeff_preds (list[Tensor]): Mask coefficients for each scale
|
312 |
-
level with shape (N, num_anchors * num_protos, H, W)
|
313 |
-
img_metas (list[dict]): Meta information of each image, e.g.,
|
314 |
-
image size, scaling factor, etc.
|
315 |
-
cfg (mmcv.Config | None): Test / postprocessing configuration,
|
316 |
-
if None, test_cfg would be used
|
317 |
-
rescale (bool): If True, return boxes in original image space.
|
318 |
-
Default: False.
|
319 |
-
|
320 |
-
Returns:
|
321 |
-
list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is
|
322 |
-
a 3-tuple. The first item is an (n, 5) tensor, where the
|
323 |
-
first 4 columns are bounding box positions
|
324 |
-
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score
|
325 |
-
between 0 and 1. The second item is an (n,) tensor where each
|
326 |
-
item is the predicted class label of the corresponding box.
|
327 |
-
The third item is an (n, num_protos) tensor where each item
|
328 |
-
is the predicted mask coefficients of instance inside the
|
329 |
-
corresponding box.
|
330 |
-
"""
|
331 |
-
assert len(cls_scores) == len(bbox_preds)
|
332 |
-
num_levels = len(cls_scores)
|
333 |
-
|
334 |
-
device = cls_scores[0].device
|
335 |
-
featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
|
336 |
-
mlvl_anchors = self.anchor_generator.grid_anchors(
|
337 |
-
featmap_sizes, device=device)
|
338 |
-
|
339 |
-
det_bboxes = []
|
340 |
-
det_labels = []
|
341 |
-
det_coeffs = []
|
342 |
-
for img_id in range(len(img_metas)):
|
343 |
-
cls_score_list = [
|
344 |
-
cls_scores[i][img_id].detach() for i in range(num_levels)
|
345 |
-
]
|
346 |
-
bbox_pred_list = [
|
347 |
-
bbox_preds[i][img_id].detach() for i in range(num_levels)
|
348 |
-
]
|
349 |
-
coeff_pred_list = [
|
350 |
-
coeff_preds[i][img_id].detach() for i in range(num_levels)
|
351 |
-
]
|
352 |
-
img_shape = img_metas[img_id]['img_shape']
|
353 |
-
scale_factor = img_metas[img_id]['scale_factor']
|
354 |
-
bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list,
|
355 |
-
coeff_pred_list, mlvl_anchors,
|
356 |
-
img_shape, scale_factor, cfg,
|
357 |
-
rescale)
|
358 |
-
det_bboxes.append(bbox_res[0])
|
359 |
-
det_labels.append(bbox_res[1])
|
360 |
-
det_coeffs.append(bbox_res[2])
|
361 |
-
return det_bboxes, det_labels, det_coeffs
|
362 |
-
|
363 |
-
def _get_bboxes_single(self,
|
364 |
-
cls_score_list,
|
365 |
-
bbox_pred_list,
|
366 |
-
coeff_preds_list,
|
367 |
-
mlvl_anchors,
|
368 |
-
img_shape,
|
369 |
-
scale_factor,
|
370 |
-
cfg,
|
371 |
-
rescale=False):
|
372 |
-
""""Similiar to func:``AnchorHead._get_bboxes_single``, but
|
373 |
-
additionally processes coeff_preds_list and uses fast NMS instead of
|
374 |
-
traditional NMS.
|
375 |
-
|
376 |
-
Args:
|
377 |
-
cls_score_list (list[Tensor]): Box scores for a single scale level
|
378 |
-
Has shape (num_anchors * num_classes, H, W).
|
379 |
-
bbox_pred_list (list[Tensor]): Box energies / deltas for a single
|
380 |
-
scale level with shape (num_anchors * 4, H, W).
|
381 |
-
coeff_preds_list (list[Tensor]): Mask coefficients for a single
|
382 |
-
scale level with shape (num_anchors * num_protos, H, W).
|
383 |
-
mlvl_anchors (list[Tensor]): Box reference for a single scale level
|
384 |
-
with shape (num_total_anchors, 4).
|
385 |
-
img_shape (tuple[int]): Shape of the input image,
|
386 |
-
(height, width, 3).
|
387 |
-
scale_factor (ndarray): Scale factor of the image arange as
|
388 |
-
(w_scale, h_scale, w_scale, h_scale).
|
389 |
-
cfg (mmcv.Config): Test / postprocessing configuration,
|
390 |
-
if None, test_cfg would be used.
|
391 |
-
rescale (bool): If True, return boxes in original image space.
|
392 |
-
|
393 |
-
Returns:
|
394 |
-
tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor,
|
395 |
-
where the first 4 columns are bounding box positions
|
396 |
-
(tl_x, tl_y, br_x, br_y) and the 5-th column is a score between
|
397 |
-
0 and 1. The second item is an (n,) tensor where each item is
|
398 |
-
the predicted class label of the corresponding box. The third
|
399 |
-
item is an (n, num_protos) tensor where each item is the
|
400 |
-
predicted mask coefficients of instance inside the
|
401 |
-
corresponding box.
|
402 |
-
"""
|
403 |
-
cfg = self.test_cfg if cfg is None else cfg
|
404 |
-
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
|
405 |
-
mlvl_bboxes = []
|
406 |
-
mlvl_scores = []
|
407 |
-
mlvl_coeffs = []
|
408 |
-
for cls_score, bbox_pred, coeff_pred, anchors in \
|
409 |
-
zip(cls_score_list, bbox_pred_list,
|
410 |
-
coeff_preds_list, mlvl_anchors):
|
411 |
-
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
|
412 |
-
cls_score = cls_score.permute(1, 2,
|
413 |
-
0).reshape(-1, self.cls_out_channels)
|
414 |
-
if self.use_sigmoid_cls:
|
415 |
-
scores = cls_score.sigmoid()
|
416 |
-
else:
|
417 |
-
scores = cls_score.softmax(-1)
|
418 |
-
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
|
419 |
-
coeff_pred = coeff_pred.permute(1, 2,
|
420 |
-
0).reshape(-1, self.num_protos)
|
421 |
-
nms_pre = cfg.get('nms_pre', -1)
|
422 |
-
if nms_pre > 0 and scores.shape[0] > nms_pre:
|
423 |
-
# Get maximum scores for foreground classes.
|
424 |
-
if self.use_sigmoid_cls:
|
425 |
-
max_scores, _ = scores.max(dim=1)
|
426 |
-
else:
|
427 |
-
# remind that we set FG labels to [0, num_class-1]
|
428 |
-
# since mmdet v2.0
|
429 |
-
# BG cat_id: num_class
|
430 |
-
max_scores, _ = scores[:, :-1].max(dim=1)
|
431 |
-
_, topk_inds = max_scores.topk(nms_pre)
|
432 |
-
anchors = anchors[topk_inds, :]
|
433 |
-
bbox_pred = bbox_pred[topk_inds, :]
|
434 |
-
scores = scores[topk_inds, :]
|
435 |
-
coeff_pred = coeff_pred[topk_inds, :]
|
436 |
-
bboxes = self.bbox_coder.decode(
|
437 |
-
anchors, bbox_pred, max_shape=img_shape)
|
438 |
-
mlvl_bboxes.append(bboxes)
|
439 |
-
mlvl_scores.append(scores)
|
440 |
-
mlvl_coeffs.append(coeff_pred)
|
441 |
-
mlvl_bboxes = torch.cat(mlvl_bboxes)
|
442 |
-
if rescale:
|
443 |
-
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
|
444 |
-
mlvl_scores = torch.cat(mlvl_scores)
|
445 |
-
mlvl_coeffs = torch.cat(mlvl_coeffs)
|
446 |
-
if self.use_sigmoid_cls:
|
447 |
-
# Add a dummy background class to the backend when using sigmoid
|
448 |
-
# remind that we set FG labels to [0, num_class-1] since mmdet v2.0
|
449 |
-
# BG cat_id: num_class
|
450 |
-
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
|
451 |
-
mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
|
452 |
-
det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores,
|
453 |
-
mlvl_coeffs,
|
454 |
-
cfg.score_thr,
|
455 |
-
cfg.iou_thr, cfg.top_k,
|
456 |
-
cfg.max_per_img)
|
457 |
-
return det_bboxes, det_labels, det_coeffs
|
458 |
-
|
459 |
-
|
460 |
-
@HEADS.register_module()
|
461 |
-
class YOLACTSegmHead(nn.Module):
|
462 |
-
"""YOLACT segmentation head used in https://arxiv.org/abs/1904.02689.
|
463 |
-
|
464 |
-
Apply a semantic segmentation loss on feature space using layers that are
|
465 |
-
only evaluated during training to increase performance with no speed
|
466 |
-
penalty.
|
467 |
-
|
468 |
-
Args:
|
469 |
-
in_channels (int): Number of channels in the input feature map.
|
470 |
-
num_classes (int): Number of categories excluding the background
|
471 |
-
category.
|
472 |
-
loss_segm (dict): Config of semantic segmentation loss.
|
473 |
-
"""
|
474 |
-
|
475 |
-
def __init__(self,
|
476 |
-
num_classes,
|
477 |
-
in_channels=256,
|
478 |
-
loss_segm=dict(
|
479 |
-
type='CrossEntropyLoss',
|
480 |
-
use_sigmoid=True,
|
481 |
-
loss_weight=1.0)):
|
482 |
-
super(YOLACTSegmHead, self).__init__()
|
483 |
-
self.in_channels = in_channels
|
484 |
-
self.num_classes = num_classes
|
485 |
-
self.loss_segm = build_loss(loss_segm)
|
486 |
-
self._init_layers()
|
487 |
-
self.fp16_enabled = False
|
488 |
-
|
489 |
-
def _init_layers(self):
|
490 |
-
"""Initialize layers of the head."""
|
491 |
-
self.segm_conv = nn.Conv2d(
|
492 |
-
self.in_channels, self.num_classes, kernel_size=1)
|
493 |
-
|
494 |
-
def init_weights(self):
|
495 |
-
"""Initialize weights of the head."""
|
496 |
-
xavier_init(self.segm_conv, distribution='uniform')
|
497 |
-
|
498 |
-
def forward(self, x):
|
499 |
-
"""Forward feature from the upstream network.
|
500 |
-
|
501 |
-
Args:
|
502 |
-
x (Tensor): Feature from the upstream network, which is
|
503 |
-
a 4D-tensor.
|
504 |
-
|
505 |
-
Returns:
|
506 |
-
Tensor: Predicted semantic segmentation map with shape
|
507 |
-
(N, num_classes, H, W).
|
508 |
-
"""
|
509 |
-
return self.segm_conv(x)
|
510 |
-
|
511 |
-
@force_fp32(apply_to=('segm_pred', ))
|
512 |
-
def loss(self, segm_pred, gt_masks, gt_labels):
|
513 |
-
"""Compute loss of the head.
|
514 |
-
|
515 |
-
Args:
|
516 |
-
segm_pred (list[Tensor]): Predicted semantic segmentation map
|
517 |
-
with shape (N, num_classes, H, W).
|
518 |
-
gt_masks (list[Tensor]): Ground truth masks for each image with
|
519 |
-
the same shape of the input image.
|
520 |
-
gt_labels (list[Tensor]): Class indices corresponding to each box.
|
521 |
-
|
522 |
-
Returns:
|
523 |
-
dict[str, Tensor]: A dictionary of loss components.
|
524 |
-
"""
|
525 |
-
loss_segm = []
|
526 |
-
num_imgs, num_classes, mask_h, mask_w = segm_pred.size()
|
527 |
-
for idx in range(num_imgs):
|
528 |
-
cur_segm_pred = segm_pred[idx]
|
529 |
-
cur_gt_masks = gt_masks[idx].float()
|
530 |
-
cur_gt_labels = gt_labels[idx]
|
531 |
-
segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks,
|
532 |
-
cur_gt_labels)
|
533 |
-
if segm_targets is None:
|
534 |
-
loss = self.loss_segm(cur_segm_pred,
|
535 |
-
torch.zeros_like(cur_segm_pred),
|
536 |
-
torch.zeros_like(cur_segm_pred))
|
537 |
-
else:
|
538 |
-
loss = self.loss_segm(
|
539 |
-
cur_segm_pred,
|
540 |
-
segm_targets,
|
541 |
-
avg_factor=num_imgs * mask_h * mask_w)
|
542 |
-
loss_segm.append(loss)
|
543 |
-
return dict(loss_segm=loss_segm)
|
544 |
-
|
545 |
-
def get_targets(self, segm_pred, gt_masks, gt_labels):
|
546 |
-
"""Compute semantic segmentation targets for each image.
|
547 |
-
|
548 |
-
Args:
|
549 |
-
segm_pred (Tensor): Predicted semantic segmentation map
|
550 |
-
with shape (num_classes, H, W).
|
551 |
-
gt_masks (Tensor): Ground truth masks for each image with
|
552 |
-
the same shape of the input image.
|
553 |
-
gt_labels (Tensor): Class indices corresponding to each box.
|
554 |
-
|
555 |
-
Returns:
|
556 |
-
Tensor: Semantic segmentation targets with shape
|
557 |
-
(num_classes, H, W).
|
558 |
-
"""
|
559 |
-
if gt_masks.size(0) == 0:
|
560 |
-
return None
|
561 |
-
num_classes, mask_h, mask_w = segm_pred.size()
|
562 |
-
with torch.no_grad():
|
563 |
-
downsampled_masks = F.interpolate(
|
564 |
-
gt_masks.unsqueeze(0), (mask_h, mask_w),
|
565 |
-
mode='bilinear',
|
566 |
-
align_corners=False).squeeze(0)
|
567 |
-
downsampled_masks = downsampled_masks.gt(0.5).float()
|
568 |
-
segm_targets = torch.zeros_like(segm_pred, requires_grad=False)
|
569 |
-
for obj_idx in range(downsampled_masks.size(0)):
|
570 |
-
segm_targets[gt_labels[obj_idx] - 1] = torch.max(
|
571 |
-
segm_targets[gt_labels[obj_idx] - 1],
|
572 |
-
downsampled_masks[obj_idx])
|
573 |
-
return segm_targets
|
574 |
-
|
575 |
-
|
576 |
-
@HEADS.register_module()
|
577 |
-
class YOLACTProtonet(nn.Module):
|
578 |
-
"""YOLACT mask head used in https://arxiv.org/abs/1904.02689.
|
579 |
-
|
580 |
-
This head outputs the mask prototypes for YOLACT.
|
581 |
-
|
582 |
-
Args:
|
583 |
-
in_channels (int): Number of channels in the input feature map.
|
584 |
-
proto_channels (tuple[int]): Output channels of protonet convs.
|
585 |
-
proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs.
|
586 |
-
include_last_relu (Bool): If keep the last relu of protonet.
|
587 |
-
num_protos (int): Number of prototypes.
|
588 |
-
num_classes (int): Number of categories excluding the background
|
589 |
-
category.
|
590 |
-
loss_mask_weight (float): Reweight the mask loss by this factor.
|
591 |
-
max_masks_to_train (int): Maximum number of masks to train for
|
592 |
-
each image.
|
593 |
-
"""
|
594 |
-
|
595 |
-
def __init__(self,
|
596 |
-
num_classes,
|
597 |
-
in_channels=256,
|
598 |
-
proto_channels=(256, 256, 256, None, 256, 32),
|
599 |
-
proto_kernel_sizes=(3, 3, 3, -2, 3, 1),
|
600 |
-
include_last_relu=True,
|
601 |
-
num_protos=32,
|
602 |
-
loss_mask_weight=1.0,
|
603 |
-
max_masks_to_train=100):
|
604 |
-
super(YOLACTProtonet, self).__init__()
|
605 |
-
self.in_channels = in_channels
|
606 |
-
self.proto_channels = proto_channels
|
607 |
-
self.proto_kernel_sizes = proto_kernel_sizes
|
608 |
-
self.include_last_relu = include_last_relu
|
609 |
-
self.protonet = self._init_layers()
|
610 |
-
|
611 |
-
self.loss_mask_weight = loss_mask_weight
|
612 |
-
self.num_protos = num_protos
|
613 |
-
self.num_classes = num_classes
|
614 |
-
self.max_masks_to_train = max_masks_to_train
|
615 |
-
self.fp16_enabled = False
|
616 |
-
|
617 |
-
def _init_layers(self):
|
618 |
-
"""A helper function to take a config setting and turn it into a
|
619 |
-
network."""
|
620 |
-
# Possible patterns:
|
621 |
-
# ( 256, 3) -> conv
|
622 |
-
# ( 256,-2) -> deconv
|
623 |
-
# (None,-2) -> bilinear interpolate
|
624 |
-
in_channels = self.in_channels
|
625 |
-
protonets = nn.ModuleList()
|
626 |
-
for num_channels, kernel_size in zip(self.proto_channels,
|
627 |
-
self.proto_kernel_sizes):
|
628 |
-
if kernel_size > 0:
|
629 |
-
layer = nn.Conv2d(
|
630 |
-
in_channels,
|
631 |
-
num_channels,
|
632 |
-
kernel_size,
|
633 |
-
padding=kernel_size // 2)
|
634 |
-
else:
|
635 |
-
if num_channels is None:
|
636 |
-
layer = InterpolateModule(
|
637 |
-
scale_factor=-kernel_size,
|
638 |
-
mode='bilinear',
|
639 |
-
align_corners=False)
|
640 |
-
else:
|
641 |
-
layer = nn.ConvTranspose2d(
|
642 |
-
in_channels,
|
643 |
-
num_channels,
|
644 |
-
-kernel_size,
|
645 |
-
padding=kernel_size // 2)
|
646 |
-
protonets.append(layer)
|
647 |
-
protonets.append(nn.ReLU(inplace=True))
|
648 |
-
in_channels = num_channels if num_channels is not None \
|
649 |
-
else in_channels
|
650 |
-
if not self.include_last_relu:
|
651 |
-
protonets = protonets[:-1]
|
652 |
-
return nn.Sequential(*protonets)
|
653 |
-
|
654 |
-
def init_weights(self):
|
655 |
-
"""Initialize weights of the head."""
|
656 |
-
for m in self.protonet:
|
657 |
-
if isinstance(m, nn.Conv2d):
|
658 |
-
xavier_init(m, distribution='uniform')
|
659 |
-
|
660 |
-
def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None):
|
661 |
-
"""Forward feature from the upstream network to get prototypes and
|
662 |
-
linearly combine the prototypes, using masks coefficients, into
|
663 |
-
instance masks. Finally, crop the instance masks with given bboxes.
|
664 |
-
|
665 |
-
Args:
|
666 |
-
x (Tensor): Feature from the upstream network, which is
|
667 |
-
a 4D-tensor.
|
668 |
-
coeff_pred (list[Tensor]): Mask coefficients for each scale
|
669 |
-
level with shape (N, num_anchors * num_protos, H, W).
|
670 |
-
bboxes (list[Tensor]): Box used for cropping with shape
|
671 |
-
(N, num_anchors * 4, H, W). During training, they are
|
672 |
-
ground truth boxes. During testing, they are predicted
|
673 |
-
boxes.
|
674 |
-
img_meta (list[dict]): Meta information of each image, e.g.,
|
675 |
-
image size, scaling factor, etc.
|
676 |
-
sampling_results (List[:obj:``SamplingResult``]): Sampler results
|
677 |
-
for each image.
|
678 |
-
|
679 |
-
Returns:
|
680 |
-
list[Tensor]: Predicted instance segmentation masks.
|
681 |
-
"""
|
682 |
-
prototypes = self.protonet(x)
|
683 |
-
prototypes = prototypes.permute(0, 2, 3, 1).contiguous()
|
684 |
-
|
685 |
-
num_imgs = x.size(0)
|
686 |
-
# Training state
|
687 |
-
if self.training:
|
688 |
-
coeff_pred_list = []
|
689 |
-
for coeff_pred_per_level in coeff_pred:
|
690 |
-
coeff_pred_per_level = \
|
691 |
-
coeff_pred_per_level.permute(0, 2, 3, 1)\
|
692 |
-
.reshape(num_imgs, -1, self.num_protos)
|
693 |
-
coeff_pred_list.append(coeff_pred_per_level)
|
694 |
-
coeff_pred = torch.cat(coeff_pred_list, dim=1)
|
695 |
-
|
696 |
-
mask_pred_list = []
|
697 |
-
for idx in range(num_imgs):
|
698 |
-
cur_prototypes = prototypes[idx]
|
699 |
-
cur_coeff_pred = coeff_pred[idx]
|
700 |
-
cur_bboxes = bboxes[idx]
|
701 |
-
cur_img_meta = img_meta[idx]
|
702 |
-
|
703 |
-
# Testing state
|
704 |
-
if not self.training:
|
705 |
-
bboxes_for_cropping = cur_bboxes
|
706 |
-
else:
|
707 |
-
cur_sampling_results = sampling_results[idx]
|
708 |
-
pos_assigned_gt_inds = \
|
709 |
-
cur_sampling_results.pos_assigned_gt_inds
|
710 |
-
bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone()
|
711 |
-
pos_inds = cur_sampling_results.pos_inds
|
712 |
-
cur_coeff_pred = cur_coeff_pred[pos_inds]
|
713 |
-
|
714 |
-
# Linearly combine the prototypes with the mask coefficients
|
715 |
-
mask_pred = cur_prototypes @ cur_coeff_pred.t()
|
716 |
-
mask_pred = torch.sigmoid(mask_pred)
|
717 |
-
|
718 |
-
h, w = cur_img_meta['img_shape'][:2]
|
719 |
-
bboxes_for_cropping[:, 0] /= w
|
720 |
-
bboxes_for_cropping[:, 1] /= h
|
721 |
-
bboxes_for_cropping[:, 2] /= w
|
722 |
-
bboxes_for_cropping[:, 3] /= h
|
723 |
-
|
724 |
-
mask_pred = self.crop(mask_pred, bboxes_for_cropping)
|
725 |
-
mask_pred = mask_pred.permute(2, 0, 1).contiguous()
|
726 |
-
mask_pred_list.append(mask_pred)
|
727 |
-
return mask_pred_list
|
728 |
-
|
729 |
-
@force_fp32(apply_to=('mask_pred', ))
|
730 |
-
def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results):
|
731 |
-
"""Compute loss of the head.
|
732 |
-
|
733 |
-
Args:
|
734 |
-
mask_pred (list[Tensor]): Predicted prototypes with shape
|
735 |
-
(num_classes, H, W).
|
736 |
-
gt_masks (list[Tensor]): Ground truth masks for each image with
|
737 |
-
the same shape of the input image.
|
738 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
739 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
740 |
-
img_meta (list[dict]): Meta information of each image, e.g.,
|
741 |
-
image size, scaling factor, etc.
|
742 |
-
sampling_results (List[:obj:``SamplingResult``]): Sampler results
|
743 |
-
for each image.
|
744 |
-
|
745 |
-
Returns:
|
746 |
-
dict[str, Tensor]: A dictionary of loss components.
|
747 |
-
"""
|
748 |
-
loss_mask = []
|
749 |
-
num_imgs = len(mask_pred)
|
750 |
-
total_pos = 0
|
751 |
-
for idx in range(num_imgs):
|
752 |
-
cur_mask_pred = mask_pred[idx]
|
753 |
-
cur_gt_masks = gt_masks[idx].float()
|
754 |
-
cur_gt_bboxes = gt_bboxes[idx]
|
755 |
-
cur_img_meta = img_meta[idx]
|
756 |
-
cur_sampling_results = sampling_results[idx]
|
757 |
-
|
758 |
-
pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds
|
759 |
-
num_pos = pos_assigned_gt_inds.size(0)
|
760 |
-
# Since we're producing (near) full image masks,
|
761 |
-
# it'd take too much vram to backprop on every single mask.
|
762 |
-
# Thus we select only a subset.
|
763 |
-
if num_pos > self.max_masks_to_train:
|
764 |
-
perm = torch.randperm(num_pos)
|
765 |
-
select = perm[:self.max_masks_to_train]
|
766 |
-
cur_mask_pred = cur_mask_pred[select]
|
767 |
-
pos_assigned_gt_inds = pos_assigned_gt_inds[select]
|
768 |
-
num_pos = self.max_masks_to_train
|
769 |
-
total_pos += num_pos
|
770 |
-
|
771 |
-
gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds]
|
772 |
-
|
773 |
-
mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks,
|
774 |
-
pos_assigned_gt_inds)
|
775 |
-
if num_pos == 0:
|
776 |
-
loss = cur_mask_pred.sum() * 0.
|
777 |
-
elif mask_targets is None:
|
778 |
-
loss = F.binary_cross_entropy(cur_mask_pred,
|
779 |
-
torch.zeros_like(cur_mask_pred),
|
780 |
-
torch.zeros_like(cur_mask_pred))
|
781 |
-
else:
|
782 |
-
cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1)
|
783 |
-
loss = F.binary_cross_entropy(
|
784 |
-
cur_mask_pred, mask_targets,
|
785 |
-
reduction='none') * self.loss_mask_weight
|
786 |
-
|
787 |
-
h, w = cur_img_meta['img_shape'][:2]
|
788 |
-
gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -
|
789 |
-
gt_bboxes_for_reweight[:, 0]) / w
|
790 |
-
gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -
|
791 |
-
gt_bboxes_for_reweight[:, 1]) / h
|
792 |
-
loss = loss.mean(dim=(1,
|
793 |
-
2)) / gt_bboxes_width / gt_bboxes_height
|
794 |
-
loss = torch.sum(loss)
|
795 |
-
loss_mask.append(loss)
|
796 |
-
|
797 |
-
if total_pos == 0:
|
798 |
-
total_pos += 1 # avoid nan
|
799 |
-
loss_mask = [x / total_pos for x in loss_mask]
|
800 |
-
|
801 |
-
return dict(loss_mask=loss_mask)
|
802 |
-
|
803 |
-
def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
|
804 |
-
"""Compute instance segmentation targets for each image.
|
805 |
-
|
806 |
-
Args:
|
807 |
-
mask_pred (Tensor): Predicted prototypes with shape
|
808 |
-
(num_classes, H, W).
|
809 |
-
gt_masks (Tensor): Ground truth masks for each image with
|
810 |
-
the same shape of the input image.
|
811 |
-
pos_assigned_gt_inds (Tensor): GT indices of the corresponding
|
812 |
-
positive samples.
|
813 |
-
Returns:
|
814 |
-
Tensor: Instance segmentation targets with shape
|
815 |
-
(num_instances, H, W).
|
816 |
-
"""
|
817 |
-
if gt_masks.size(0) == 0:
|
818 |
-
return None
|
819 |
-
mask_h, mask_w = mask_pred.shape[-2:]
|
820 |
-
gt_masks = F.interpolate(
|
821 |
-
gt_masks.unsqueeze(0), (mask_h, mask_w),
|
822 |
-
mode='bilinear',
|
823 |
-
align_corners=False).squeeze(0)
|
824 |
-
gt_masks = gt_masks.gt(0.5).float()
|
825 |
-
mask_targets = gt_masks[pos_assigned_gt_inds]
|
826 |
-
return mask_targets
|
827 |
-
|
828 |
-
def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
|
829 |
-
"""Resize, binarize, and format the instance mask predictions.
|
830 |
-
|
831 |
-
Args:
|
832 |
-
mask_pred (Tensor): shape (N, H, W).
|
833 |
-
label_pred (Tensor): shape (N, ).
|
834 |
-
img_meta (dict): Meta information of each image, e.g.,
|
835 |
-
image size, scaling factor, etc.
|
836 |
-
rescale (bool): If rescale is False, then returned masks will
|
837 |
-
fit the scale of imgs[0].
|
838 |
-
Returns:
|
839 |
-
list[ndarray]: Mask predictions grouped by their predicted classes.
|
840 |
-
"""
|
841 |
-
ori_shape = img_meta['ori_shape']
|
842 |
-
scale_factor = img_meta['scale_factor']
|
843 |
-
if rescale:
|
844 |
-
img_h, img_w = ori_shape[:2]
|
845 |
-
else:
|
846 |
-
img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32)
|
847 |
-
img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32)
|
848 |
-
|
849 |
-
cls_segms = [[] for _ in range(self.num_classes)]
|
850 |
-
if mask_pred.size(0) == 0:
|
851 |
-
return cls_segms
|
852 |
-
|
853 |
-
mask_pred = F.interpolate(
|
854 |
-
mask_pred.unsqueeze(0), (img_h, img_w),
|
855 |
-
mode='bilinear',
|
856 |
-
align_corners=False).squeeze(0) > 0.5
|
857 |
-
mask_pred = mask_pred.cpu().numpy().astype(np.uint8)
|
858 |
-
|
859 |
-
for m, l in zip(mask_pred, label_pred):
|
860 |
-
cls_segms[l].append(m)
|
861 |
-
return cls_segms
|
862 |
-
|
863 |
-
def crop(self, masks, boxes, padding=1):
|
864 |
-
"""Crop predicted masks by zeroing out everything not in the predicted
|
865 |
-
bbox.
|
866 |
-
|
867 |
-
Args:
|
868 |
-
masks (Tensor): shape [H, W, N].
|
869 |
-
boxes (Tensor): bbox coords in relative point form with
|
870 |
-
shape [N, 4].
|
871 |
-
|
872 |
-
Return:
|
873 |
-
Tensor: The cropped masks.
|
874 |
-
"""
|
875 |
-
h, w, n = masks.size()
|
876 |
-
x1, x2 = self.sanitize_coordinates(
|
877 |
-
boxes[:, 0], boxes[:, 2], w, padding, cast=False)
|
878 |
-
y1, y2 = self.sanitize_coordinates(
|
879 |
-
boxes[:, 1], boxes[:, 3], h, padding, cast=False)
|
880 |
-
|
881 |
-
rows = torch.arange(
|
882 |
-
w, device=masks.device, dtype=x1.dtype).view(1, -1,
|
883 |
-
1).expand(h, w, n)
|
884 |
-
cols = torch.arange(
|
885 |
-
h, device=masks.device, dtype=x1.dtype).view(-1, 1,
|
886 |
-
1).expand(h, w, n)
|
887 |
-
|
888 |
-
masks_left = rows >= x1.view(1, 1, -1)
|
889 |
-
masks_right = rows < x2.view(1, 1, -1)
|
890 |
-
masks_up = cols >= y1.view(1, 1, -1)
|
891 |
-
masks_down = cols < y2.view(1, 1, -1)
|
892 |
-
|
893 |
-
crop_mask = masks_left * masks_right * masks_up * masks_down
|
894 |
-
|
895 |
-
return masks * crop_mask.float()
|
896 |
-
|
897 |
-
def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
|
898 |
-
"""Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0,
|
899 |
-
and x2 <= image_size. Also converts from relative to absolute
|
900 |
-
coordinates and casts the results to long tensors.
|
901 |
-
|
902 |
-
Warning: this does things in-place behind the scenes so
|
903 |
-
copy if necessary.
|
904 |
-
|
905 |
-
Args:
|
906 |
-
_x1 (Tensor): shape (N, ).
|
907 |
-
_x2 (Tensor): shape (N, ).
|
908 |
-
img_size (int): Size of the input image.
|
909 |
-
padding (int): x1 >= padding, x2 <= image_size-padding.
|
910 |
-
cast (bool): If cast is false, the result won't be cast to longs.
|
911 |
-
|
912 |
-
Returns:
|
913 |
-
tuple:
|
914 |
-
x1 (Tensor): Sanitized _x1.
|
915 |
-
x2 (Tensor): Sanitized _x2.
|
916 |
-
"""
|
917 |
-
x1 = x1 * img_size
|
918 |
-
x2 = x2 * img_size
|
919 |
-
if cast:
|
920 |
-
x1 = x1.long()
|
921 |
-
x2 = x2.long()
|
922 |
-
x1 = torch.min(x1, x2)
|
923 |
-
x2 = torch.max(x1, x2)
|
924 |
-
x1 = torch.clamp(x1 - padding, min=0)
|
925 |
-
x2 = torch.clamp(x2 + padding, max=img_size)
|
926 |
-
return x1, x2
|
927 |
-
|
928 |
-
|
929 |
-
class InterpolateModule(nn.Module):
|
930 |
-
"""This is a module version of F.interpolate.
|
931 |
-
|
932 |
-
Any arguments you give it just get passed along for the ride.
|
933 |
-
"""
|
934 |
-
|
935 |
-
def __init__(self, *args, **kwargs):
|
936 |
-
super().__init__()
|
937 |
-
|
938 |
-
self.args = args
|
939 |
-
self.kwargs = kwargs
|
940 |
-
|
941 |
-
def forward(self, x):
|
942 |
-
"""Forward features from the upstream network."""
|
943 |
-
return F.interpolate(x, *self.args, **self.kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/detectors/faster_rcnn.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .two_stage import TwoStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class FasterRCNN(TwoStageDetector):
|
7 |
-
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
|
8 |
-
|
9 |
-
def __init__(self,
|
10 |
-
backbone,
|
11 |
-
rpn_head,
|
12 |
-
roi_head,
|
13 |
-
train_cfg,
|
14 |
-
test_cfg,
|
15 |
-
neck=None,
|
16 |
-
pretrained=None):
|
17 |
-
super(FasterRCNN, self).__init__(
|
18 |
-
backbone=backbone,
|
19 |
-
neck=neck,
|
20 |
-
rpn_head=rpn_head,
|
21 |
-
roi_head=roi_head,
|
22 |
-
train_cfg=train_cfg,
|
23 |
-
test_cfg=test_cfg,
|
24 |
-
pretrained=pretrained)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/meta_arch/clip_rcnn.py
DELETED
@@ -1,1560 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
from typing import Dict, List, Optional, Tuple
|
5 |
-
from numpy.lib import pad
|
6 |
-
import torch
|
7 |
-
from torch import nn
|
8 |
-
from torch.nn import functional as F
|
9 |
-
from random import randint
|
10 |
-
|
11 |
-
from detectron2.config import configurable
|
12 |
-
from detectron2.data.detection_utils import convert_image_to_rgb
|
13 |
-
from detectron2.structures import ImageList, Instances, Boxes
|
14 |
-
from detectron2.utils.events import get_event_storage
|
15 |
-
from detectron2.utils.logger import log_first_n
|
16 |
-
|
17 |
-
from ..backbone import Backbone, build_backbone, build_text_backbone
|
18 |
-
from ..postprocessing import detector_postprocess
|
19 |
-
from ..proposal_generator import build_proposal_generator
|
20 |
-
from ..roi_heads import build_roi_heads
|
21 |
-
from .build import META_ARCH_REGISTRY
|
22 |
-
|
23 |
-
from PIL import Image
|
24 |
-
import torchvision
|
25 |
-
from torchvision.transforms import Resize, CenterCrop
|
26 |
-
from detectron2.data.datasets.clip_prompt_utils import get_cls_names, pre_tokenize
|
27 |
-
import copy
|
28 |
-
from ..backbone.fpn import build_resnet_fpn_backbone
|
29 |
-
from ..roi_heads.fast_rcnn import fast_rcnn_inference
|
30 |
-
from detectron2.layers import ShapeSpec
|
31 |
-
from ..backbone.clip_backbone import build_clip_language_encoder
|
32 |
-
from detectron2.utils.comm import gather_tensors, MILCrossEntropy, SoftTargetCrossEntropy
|
33 |
-
|
34 |
-
__all__ = ["CLIPRCNN", "CLIPFastRCNN", "PretrainFastRCNN"]
|
35 |
-
|
36 |
-
@META_ARCH_REGISTRY.register()
|
37 |
-
class CLIPRCNN(nn.Module):
|
38 |
-
"""
|
39 |
-
CLIP in R-CNN format.
|
40 |
-
It takes the image regions as inputs and classifies each image.
|
41 |
-
It contains the following two components:
|
42 |
-
1. Per-image feature extraction (visual encoder)
|
43 |
-
2. Per-image prediction (text-based classifier)
|
44 |
-
"""
|
45 |
-
@configurable
|
46 |
-
def __init__(
|
47 |
-
self,
|
48 |
-
*,
|
49 |
-
clip: Backbone,
|
50 |
-
offline_backbone: Backbone,
|
51 |
-
offline_proposal_generator: nn.Module,
|
52 |
-
roi_heads: nn.Module,
|
53 |
-
pixel_mean: Tuple[float],
|
54 |
-
pixel_std: Tuple[float],
|
55 |
-
input_format: Optional[str] = None,
|
56 |
-
vis_period: int = 0,
|
57 |
-
clip_crop_region_type: str = 'GT',
|
58 |
-
test_score_thresh: float = 0.0001,
|
59 |
-
test_nms_thresh: float = 0.5,
|
60 |
-
test_topk_per_image: float = 300,
|
61 |
-
):
|
62 |
-
"""
|
63 |
-
Args:
|
64 |
-
backbone: a backbone module, must follow detectron2's backbone interface
|
65 |
-
proposal_generator: a module that generates proposals using backbone features
|
66 |
-
roi_heads: a ROI head that performs per-region computation
|
67 |
-
pixel_mean, pixel_std: list or tuple with #channels element, representing
|
68 |
-
the per-channel mean and std to be used to normalize the input image
|
69 |
-
input_format: describe the meaning of channels of input. Needed by visualization
|
70 |
-
vis_period: the period to run visualization. Set to 0 to disable.
|
71 |
-
"""
|
72 |
-
super().__init__()
|
73 |
-
self.clip_backbone = clip
|
74 |
-
self.offline_backbone = offline_backbone
|
75 |
-
self.offline_proposal_generator = offline_proposal_generator
|
76 |
-
self.roi_heads = roi_heads
|
77 |
-
|
78 |
-
self.input_format = input_format
|
79 |
-
self.vis_period = vis_period
|
80 |
-
if vis_period > 0:
|
81 |
-
assert input_format is not None, "input_format is required for visualization!"
|
82 |
-
|
83 |
-
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
|
84 |
-
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
|
85 |
-
assert (
|
86 |
-
self.pixel_mean.shape == self.pixel_std.shape
|
87 |
-
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
|
88 |
-
# Detectron2 default pixel mean and std
|
89 |
-
self.register_buffer("detectron_pixel_mean", torch.tensor([103.530, 116.280, 123.675]).view(-1, 1, 1), False)
|
90 |
-
self.register_buffer("detectron_pixel_std", torch.tensor([1.0, 1.0, 1.0]).view(-1, 1, 1), False)
|
91 |
-
|
92 |
-
# CLIP image loading
|
93 |
-
if np.sum(pixel_mean) < 3.0: # converrt pixel value to range [0.0, 1.0] by dividing 255.0
|
94 |
-
assert input_format == 'RGB'
|
95 |
-
self.div_pixel = True
|
96 |
-
else: # default setting
|
97 |
-
self.div_pixel = False
|
98 |
-
n_px = 224
|
99 |
-
self.clip_resize = Resize(n_px, interpolation=Image.BICUBIC) # shorter side becomes n_px
|
100 |
-
self.clip_center_crop = CenterCrop(n_px) # crop image into n_px * n_px at the center
|
101 |
-
self.region_crop_scales = (1.0, 1.5) # (1.0, 2.0) # (1.0, 1.2) # (1.0,) #
|
102 |
-
|
103 |
-
# CLIP text prompt loading
|
104 |
-
print("Working on pre_tokenize...")
|
105 |
-
cls_names = get_cls_names(filter_novel=False, from_file='/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/trained_models/concept_pool/googlecc_nouns_filtered_100.txt') # filter_novel=True; coco='all', coco='base', coco='target'; from_file: a file path for concept pool
|
106 |
-
# from_file='/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/trained_models/concept_pool/googlecc_nouns_triplet_parser_filtered_100.txt'
|
107 |
-
print("Got {} class names: {}\n {} class names in total.".format(len(cls_names), cls_names, len(cls_names)))
|
108 |
-
input_ids = pre_tokenize(cls_names)
|
109 |
-
self.num_cls = input_ids.size(0)
|
110 |
-
self.num_prompt = input_ids.size(1)
|
111 |
-
self.input_ids_flat = input_ids.view(-1, input_ids.size(2)) # [#cls*#prompts, #context_length]
|
112 |
-
self.clss_emb_all = None
|
113 |
-
|
114 |
-
# CLIP crop image configs
|
115 |
-
self.clip_crop_region_type = clip_crop_region_type
|
116 |
-
self.test_score_thresh = test_score_thresh
|
117 |
-
self.test_nms_thresh = test_nms_thresh
|
118 |
-
self.test_topk_per_image = test_topk_per_image
|
119 |
-
|
120 |
-
@classmethod
|
121 |
-
def from_config(cls, cfg):
|
122 |
-
if cfg.MODEL.CLIP.CROP_REGION_TYPE == "RPN":
|
123 |
-
offline_backbone = build_resnet_fpn_backbone(cfg, ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))) # build_backbone(cfg)
|
124 |
-
offline_rpn = build_proposal_generator(cfg, offline_backbone.output_shape())
|
125 |
-
roi_heads = None # build_roi_heads(cfg, backbone.output_shape()),
|
126 |
-
elif cfg.MODEL.CLIP.CROP_REGION_TYPE == "GT":
|
127 |
-
offline_backbone = None
|
128 |
-
offline_rpn = None
|
129 |
-
roi_heads = None
|
130 |
-
clip = build_backbone(cfg)
|
131 |
-
return {
|
132 |
-
"clip": clip,
|
133 |
-
"offline_backbone": offline_backbone,
|
134 |
-
"offline_proposal_generator": offline_rpn,
|
135 |
-
"roi_heads": roi_heads,
|
136 |
-
"input_format": cfg.INPUT.FORMAT,
|
137 |
-
"vis_period": cfg.VIS_PERIOD,
|
138 |
-
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
|
139 |
-
"pixel_std": cfg.MODEL.PIXEL_STD,
|
140 |
-
"clip_crop_region_type" : cfg.MODEL.CLIP.CROP_REGION_TYPE,
|
141 |
-
"test_score_thresh" : cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST,
|
142 |
-
"test_nms_thresh" : cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST,
|
143 |
-
"test_topk_per_image" : cfg.TEST.DETECTIONS_PER_IMAGE,
|
144 |
-
}
|
145 |
-
|
146 |
-
@property
|
147 |
-
def device(self):
|
148 |
-
return self.pixel_mean.device
|
149 |
-
|
150 |
-
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
151 |
-
"""
|
152 |
-
Args:
|
153 |
-
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
|
154 |
-
Each item in the list contains the inputs for one image.
|
155 |
-
For now, each item in the list is a dict that contains:
|
156 |
-
|
157 |
-
* image: Tensor, image in (C, H, W) format.
|
158 |
-
* instances (optional): groundtruth :class:`Instances`
|
159 |
-
* proposals (optional): :class:`Instances`, precomputed proposals.
|
160 |
-
|
161 |
-
Other information that's included in the original dicts, such as:
|
162 |
-
|
163 |
-
* "height", "width" (int): the output resolution of the model, used in inference.
|
164 |
-
See :meth:`postprocess` for details.
|
165 |
-
|
166 |
-
Returns:
|
167 |
-
list[dict]:
|
168 |
-
Each dict is the output for one input image.
|
169 |
-
The dict contains one key "instances" whose value is a :class:`Instances`.
|
170 |
-
The :class:`Instances` object has the following keys:
|
171 |
-
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
|
172 |
-
"""
|
173 |
-
if not self.training:
|
174 |
-
return self.inference(batched_inputs)
|
175 |
-
# No training mode for this arch
|
176 |
-
|
177 |
-
def inference(
|
178 |
-
self,
|
179 |
-
batched_inputs: List[Dict[str, torch.Tensor]],
|
180 |
-
detected_instances: Optional[List[Instances]] = None,
|
181 |
-
do_postprocess: bool = True,
|
182 |
-
):
|
183 |
-
"""
|
184 |
-
Run inference on the given inputs.
|
185 |
-
|
186 |
-
Args:
|
187 |
-
batched_inputs (list[dict]): same as in :meth:`forward`
|
188 |
-
detected_instances (None or list[Instances]): if not None, it
|
189 |
-
contains an `Instances` object per image. The `Instances`
|
190 |
-
object contains "pred_boxes" and "pred_classes" which are
|
191 |
-
known boxes in the image.
|
192 |
-
The inference will then skip the detection of bounding boxes,
|
193 |
-
and only predict other per-ROI outputs.
|
194 |
-
do_postprocess (bool): whether to apply post-processing on the outputs.
|
195 |
-
|
196 |
-
Returns:
|
197 |
-
When do_postprocess=True, same as in :meth:`forward`.
|
198 |
-
Otherwise, a list[Instances] containing raw network outputs.
|
199 |
-
"""
|
200 |
-
assert not self.training
|
201 |
-
|
202 |
-
# get the label prompt, and use CLIP.encode_text() to compute text emb only once
|
203 |
-
if self.clss_emb_all is None: # compute only once
|
204 |
-
num_instances = self.input_ids_flat.size(0)
|
205 |
-
per_split = 1000
|
206 |
-
num_splits = num_instances // per_split
|
207 |
-
input_ids_flat = self.input_ids_flat.to(self.device)
|
208 |
-
#self.clss_emb_all = torch.ones((1203, 512)).to(self.device)
|
209 |
-
clss_emb_all = []
|
210 |
-
for i in range(num_splits+1):
|
211 |
-
if i < num_splits:
|
212 |
-
clss_emb_i = self.clip_backbone.encode_text(input_ids_flat[per_split*i:per_split*(i+1)]) # per_split x D
|
213 |
-
else:
|
214 |
-
clss_emb_i = self.clip_backbone.encode_text(input_ids_flat[per_split*i:]) # per_split x D
|
215 |
-
# clss_emb_i = clip_model.encode_label(torch.arange(0, 1000).view(-1, 1).long().to(device)) # per_split x D
|
216 |
-
clss_emb_all.append(clss_emb_i)
|
217 |
-
self.clss_emb_all = torch.cat(clss_emb_all, 0).view(self.num_cls, self.num_prompt, -1) # [#cls, #prompts, D]
|
218 |
-
self.clss_emb_all = self.clss_emb_all.mean(1) # ensemble different prompts for each class
|
219 |
-
# torch.save(self.clss_emb_all.cpu(), "/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/trained_models/lvis_cls_emb/coco_17_target_cls_emb_notnorm_rn50x4.pth")
|
220 |
-
self.clss_emb_all = F.normalize(self.clss_emb_all, p=2.0, dim=1) # [#cls, emb_dim]
|
221 |
-
else:
|
222 |
-
assert self.clss_emb_all.device == self.device
|
223 |
-
|
224 |
-
# get the region proposals, from the backbone & RPN of standard Mask-RCNN, trained on base classes
|
225 |
-
if self.clip_crop_region_type == "GT":
|
226 |
-
proposals = None
|
227 |
-
elif self.clip_crop_region_type == "RPN":
|
228 |
-
images = self.preprocess_image(batched_inputs)
|
229 |
-
features = self.offline_backbone(images.tensor)
|
230 |
-
if detected_instances is None:
|
231 |
-
if self.offline_proposal_generator is not None:
|
232 |
-
proposals, _ = self.offline_proposal_generator(images, features, None)
|
233 |
-
|
234 |
-
# crop image regions, and use CLIP.encode_image() to get the visual feature
|
235 |
-
images, bbs, num_bbs = self.preprocess_image_crop(batched_inputs, rpn_proposals=proposals)
|
236 |
-
img_emb = self.clip_backbone.encode_image(images.tensor)
|
237 |
-
img_emb = img_emb.view(-1, len(self.region_crop_scales), img_emb.size(1))
|
238 |
-
img_emb = torch.sum(img_emb, dim=1) # ensemble different scales for each region
|
239 |
-
img_emb = F.normalize(img_emb, p=2.0, dim=1)
|
240 |
-
|
241 |
-
# cosine similarity as logits
|
242 |
-
all_scores = torch.mm(img_emb, self.clss_emb_all.T)
|
243 |
-
all_scores = F.softmax(all_scores, dim=-1)
|
244 |
-
scores, pred_cls = torch.max(all_scores, dim=-1) # Note: [0, #cls-1] representing the categories. The value #cls represents "background".
|
245 |
-
|
246 |
-
# convert model outputs into regular output result format
|
247 |
-
scores_per_img = scores.split(num_bbs)
|
248 |
-
pred_cls_per_img = pred_cls.split(num_bbs)
|
249 |
-
all_scores_per_img = all_scores.split(num_bbs)
|
250 |
-
|
251 |
-
# per-class NMS
|
252 |
-
if self.clip_crop_region_type == "GT":
|
253 |
-
image_shapes = [x['instances']._image_size for x in batched_inputs]
|
254 |
-
bbs = [bb.to(self.device) for bb in bbs]
|
255 |
-
pred_instances, _ = fast_rcnn_inference(bbs, all_scores_per_img, image_shapes, \
|
256 |
-
self.test_score_thresh, self.test_nms_thresh, self.test_topk_per_image)
|
257 |
-
results = pred_instances
|
258 |
-
|
259 |
-
# results = []
|
260 |
-
# for r_i, (b_input, bb, sc, prd) in enumerate(zip(batched_inputs, bbs, scores_per_img, pred_cls_per_img)):
|
261 |
-
# this_result = copy.deepcopy(b_input["instances"]) # Instance
|
262 |
-
# if self.clip_crop_region_type == "GT":
|
263 |
-
# result_boxes = this_result._fields['gt_boxes'].to(self.device)
|
264 |
-
# elif self.clip_crop_region_type == "RPN": # directly use RPN boxes without per-class NMS
|
265 |
-
# result_boxes = bb # result_boxes = Boxes(bb)
|
266 |
-
# this_result._fields = {'pred_boxes': result_boxes, 'scores': sc, 'pred_classes': prd}
|
267 |
-
# results.append(this_result)
|
268 |
-
|
269 |
-
# sanity check: GT boxes + GT classes
|
270 |
-
# results = []
|
271 |
-
# for b_input in batched_inputs:
|
272 |
-
# this_result = copy.deepcopy(b_input["instances"]) # Instance
|
273 |
-
# gt_boxes = this_result._fields['gt_boxes'].to(self.device)
|
274 |
-
# gt_cls = this_result._fields['gt_classes'].to(self.device)
|
275 |
-
# this_result._fields = {'pred_boxes': gt_boxes, 'scores': torch.ones(gt_cls.size(0)).to(self.device), 'pred_classes': gt_cls}
|
276 |
-
# #this_result._fields = {'pred_boxes': gt_boxes, 'scores': sc, 'pred_classes': prd}
|
277 |
-
# results.append(this_result)
|
278 |
-
elif self.clip_crop_region_type == "RPN":
|
279 |
-
image_shapes = [x.image_size for x in proposals]
|
280 |
-
pred_instances, _ = fast_rcnn_inference(bbs, all_scores_per_img, image_shapes, \
|
281 |
-
self.test_score_thresh, self.test_nms_thresh, self.test_topk_per_image)
|
282 |
-
results = pred_instances
|
283 |
-
|
284 |
-
if do_postprocess:
|
285 |
-
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
|
286 |
-
return CLIPRCNN._postprocess(results, batched_inputs)
|
287 |
-
else:
|
288 |
-
return results
|
289 |
-
|
290 |
-
def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
291 |
-
"""
|
292 |
-
Normalize, pad and batch the input images. Use detectron2 default processing (pixel mean & std).
|
293 |
-
Note: Due to FPN size_divisibility, images are padded by right/bottom border. So FPN is consistent with C4 and GT boxes.
|
294 |
-
"""
|
295 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
296 |
-
images = [(x - self.detectron_pixel_mean) / self.detectron_pixel_std for x in images]
|
297 |
-
images = ImageList.from_tensors(images, self.offline_backbone.size_divisibility)
|
298 |
-
return images
|
299 |
-
|
300 |
-
def preprocess_image_crop(self, batched_inputs: List[Dict[str, torch.Tensor]], rpn_proposals=None, max_num_rpn=1000):
|
301 |
-
"""
|
302 |
-
Crop image regions based on GT or RPN boxes with different scales.
|
303 |
-
Then apply CLIP tranformation: resizing / cropping the regions into square shape (224 * 224).
|
304 |
-
Followed by the default preprocessing in Detectron2 as follows.
|
305 |
-
Normalize, pad and batch the input images.
|
306 |
-
"""
|
307 |
-
def clip_crop_region(image, box, scales=(1.0, 1.5)):
|
308 |
-
"""Crop image regions based on given boxes. Return different scales of region crops. (3 hrs)"""
|
309 |
-
img_h, img_w = image.size(1), image.size(2)
|
310 |
-
x1, y1, x2, y2 = list(box)
|
311 |
-
assert x1 < x2 and y1 < y2 and x2 < (img_w + 1) and y2 < (img_h + 1)
|
312 |
-
x_center = (x1 + x2) / 2.0
|
313 |
-
y_center = (y1 + y2) / 2.0
|
314 |
-
half_w = x_center - x1
|
315 |
-
half_h = y_center - y1
|
316 |
-
regions = []
|
317 |
-
for scale in scales: # get region coordinates
|
318 |
-
r_y1 = int(max(0, (y_center - half_h * scale).item()))
|
319 |
-
r_y2 = int(min(img_h, (y_center + half_h * scale).item()))
|
320 |
-
r_x1 = int(max(0, (x_center - half_w * scale).item()))
|
321 |
-
r_x2 = int(min(img_w, (x_center + half_w * scale).item()))
|
322 |
-
# sanity check
|
323 |
-
if r_y2 - r_y1 <= 1:
|
324 |
-
r_y2 = int(min(img_h, r_y2 + 2))
|
325 |
-
if r_y2 - r_y1 <= 1:
|
326 |
-
r_y1 = int(max(0, r_y1 - 2))
|
327 |
-
if r_x2 - r_x1 <= 1:
|
328 |
-
r_x2 = int(min(img_w, r_x2 + 2))
|
329 |
-
if r_x2 - r_x1 <= 1:
|
330 |
-
r_x1 = int(max(0, r_x1 - 2))
|
331 |
-
regions.append(image[:, r_y1:r_y2, r_x1:r_x2])
|
332 |
-
return regions
|
333 |
-
|
334 |
-
def clip_square_crop(image, box, scales=(1.0,)):
|
335 |
-
"""Crop image regions based on given boxes. Ensure square region as much as possible. (1.75 hrs)"""
|
336 |
-
img_h, img_w = image.size(1), image.size(2)
|
337 |
-
x1, y1, x2, y2 = list(box)
|
338 |
-
assert x1 < x2 and y1 < y2 and x2 < (img_w + 1) and y2 < (img_h + 1)
|
339 |
-
x_center = (x1 + x2) / 2.0
|
340 |
-
y_center = (y1 + y2) / 2.0
|
341 |
-
half_w = x_center - x1
|
342 |
-
half_h = y_center - y1
|
343 |
-
square_side = max(half_w, half_h)
|
344 |
-
half_w = square_side
|
345 |
-
half_h = square_side
|
346 |
-
regions = []
|
347 |
-
for scale in scales: # get region coordinates
|
348 |
-
if square_side * square_side < 2500: # crop larger context area for tiny objects
|
349 |
-
scale = 1.5 if scale == 1.0 else 4.0
|
350 |
-
# elif square_side * square_side > 90000: # crop exact area for large objects
|
351 |
-
# scale = 1.0 if scale == 1.0 else 1.1
|
352 |
-
r_y1 = int(max(0, (y_center - half_h * scale).item()))
|
353 |
-
r_y2 = int(min(img_h, (y_center + half_h * scale).item()))
|
354 |
-
r_x1 = int(max(0, (x_center - half_w * scale).item()))
|
355 |
-
r_x2 = int(min(img_w, (x_center + half_w * scale).item()))
|
356 |
-
# sanity check
|
357 |
-
if r_y2 - r_y1 <= 1:
|
358 |
-
r_y2 = int(min(img_h, r_y2 + 2))
|
359 |
-
if r_y2 - r_y1 <= 1:
|
360 |
-
r_y1 = int(max(0, r_y1 - 2))
|
361 |
-
if r_x2 - r_x1 <= 1:
|
362 |
-
r_x2 = int(min(img_w, r_x2 + 2))
|
363 |
-
if r_x2 - r_x1 <= 1:
|
364 |
-
r_x1 = int(max(0, r_x1 - 2))
|
365 |
-
#regions.append(image[:, r_y1:r_y2, r_x1:r_x2])
|
366 |
-
# if the cropped image isn't square (due to image boundaries), pad the cropped region
|
367 |
-
crop_image = image[:, r_y1:r_y2, r_x1:r_x2]
|
368 |
-
r_h, r_w = crop_image.size(1), crop_image.size(2)
|
369 |
-
pad_image = torch.zeros((3, int(2 * half_h.item() * scale) + 4 , int(2 * half_w.item() * scale) + 4)) #.fill_(torch.mean(crop_image.float()))
|
370 |
-
p_h, p_w = pad_image.size(1), pad_image.size(2)
|
371 |
-
pad_image[:, int(((p_h - r_h) / 2)):int(((p_h - r_h) / 2 + r_h)), int(((p_w - r_w) / 2)):int(((p_w - r_w) / 2 + r_w))] = crop_image
|
372 |
-
regions.append(pad_image.type(torch.uint8))
|
373 |
-
return regions
|
374 |
-
|
375 |
-
def vis_crop(f_n, images):
|
376 |
-
"""visualize the crop regions to diagnose the accuracy."""
|
377 |
-
if f_n not in ['datasets/coco/train2017/000000008691.jpg']:
|
378 |
-
for p_i, pad_image in enumerate(images):
|
379 |
-
to_save = pad_image.permute(1, 2, 0).numpy()
|
380 |
-
to_save = Image.fromarray(np.array(to_save, np.uint8))
|
381 |
-
#to_save.save("output/regions/" + f_n.split("/")[-1].split(".")[0] + "-{}.png".format(p_i))
|
382 |
-
pass
|
383 |
-
|
384 |
-
# crop image region
|
385 |
-
images = []
|
386 |
-
bbs = []
|
387 |
-
num_bbs = []
|
388 |
-
for img_i, b_input in enumerate(batched_inputs):
|
389 |
-
this_img = b_input["image"]
|
390 |
-
if self.clip_crop_region_type == "GT":
|
391 |
-
this_boxes = b_input["instances"]._fields['gt_boxes'].tensor # variant #bbox (eg, max 759), might lead to OOM
|
392 |
-
elif self.clip_crop_region_type == "RPN":
|
393 |
-
this_boxes = rpn_proposals[img_i]._fields['proposal_boxes'].tensor[:max_num_rpn]
|
394 |
-
|
395 |
-
bbs.append(this_boxes)
|
396 |
-
num_bbs.append(this_boxes.size(0))
|
397 |
-
for this_box in this_boxes:
|
398 |
-
#images.extend(clip_crop_region(this_img, this_box, self.region_crop_scales))
|
399 |
-
images.extend(clip_square_crop(this_img, this_box, self.region_crop_scales))
|
400 |
-
#vis_crop(batched_inputs[0]['file_name'], images)
|
401 |
-
images = [self.clip_resize(x) for x in images]
|
402 |
-
images = [self.clip_center_crop(x) for x in images]
|
403 |
-
images = [x.to(self.device) for x in images]
|
404 |
-
if self.div_pixel:
|
405 |
-
images = [((x / 255.0) - self.pixel_mean) / self.pixel_std for x in images]
|
406 |
-
else:
|
407 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
408 |
-
images = ImageList.from_tensors(images, self.clip_backbone.size_divisibility) # batch images into single tensor by padding to same size
|
409 |
-
return images, bbs, num_bbs
|
410 |
-
|
411 |
-
@staticmethod
|
412 |
-
def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]]):
|
413 |
-
"""
|
414 |
-
Rescale the output instances to the target size.
|
415 |
-
"""
|
416 |
-
# note: private function; subject to changes
|
417 |
-
processed_results = []
|
418 |
-
for results_per_image, input_per_image in zip(
|
419 |
-
instances, batched_inputs):
|
420 |
-
height = input_per_image["height"] # original image size, before resizing
|
421 |
-
width = input_per_image["width"] # original image size, before resizing
|
422 |
-
r = detector_postprocess(results_per_image, height, width)
|
423 |
-
processed_results.append({"instances": r})
|
424 |
-
return processed_results
|
425 |
-
|
426 |
-
def inference_on_cifar(self, pseudo_input):
|
427 |
-
""" Evaluate recoginition accuracy on CIFAR-10 for sanity check """
|
428 |
-
# get the label prompt, and use CLIP.encode_text() to compute text emb only once
|
429 |
-
cifar_cls_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
|
430 |
-
input_ids = pre_tokenize(cifar_cls_names)
|
431 |
-
num_cls = input_ids.size(0)
|
432 |
-
input_ids_flat = input_ids.view(-1, input_ids.size(2))
|
433 |
-
input_ids_flat = input_ids_flat.to(self.device)
|
434 |
-
|
435 |
-
clss_emb_all = self.clip_backbone.encode_text(input_ids_flat)
|
436 |
-
clss_emb_all = clss_emb_all.view(num_cls, self.num_prompt, -1)
|
437 |
-
clss_emb_all = clss_emb_all.mean(1)
|
438 |
-
clss_emb_all = F.normalize(clss_emb_all, p=2.0, dim=1) # [#cls, emb_dim]
|
439 |
-
|
440 |
-
# dataset loads images and labels
|
441 |
-
testset = torchvision.datasets.CIFAR10(root='./datasets', train=False,
|
442 |
-
download=False, transform=None)
|
443 |
-
# testloader = torch.utils.data.DataLoader(testset, batch_size=4,
|
444 |
-
# shuffle=False, num_workers=0)
|
445 |
-
|
446 |
-
# inference on each image and calculate accuracy
|
447 |
-
correct = 0
|
448 |
-
wrong = 0
|
449 |
-
for idx, inputs in enumerate(testset):
|
450 |
-
if idx % 1000 == 0:
|
451 |
-
print(idx)
|
452 |
-
# preprocess images
|
453 |
-
raw_image, label = inputs
|
454 |
-
image = np.array(raw_image) # [h, w, 3]
|
455 |
-
image = torch.from_numpy(image)
|
456 |
-
image = image.permute(2, 0, 1) # [3, h, w]
|
457 |
-
images = [image]
|
458 |
-
images = [self.clip_resize(x) for x in images]
|
459 |
-
images = [self.clip_center_crop(x) for x in images]
|
460 |
-
images = [x.to(self.device) for x in images]
|
461 |
-
if self.div_pixel:
|
462 |
-
images = [((x / 255.0) - self.pixel_mean) / self.pixel_std for x in images]
|
463 |
-
else:
|
464 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
465 |
-
|
466 |
-
# get image embedding
|
467 |
-
img_emb = self.clip_backbone.encode_image(images[0].unsqueeze(0))
|
468 |
-
img_emb = img_emb.view(-1, 1, img_emb.size(1))
|
469 |
-
img_emb = torch.sum(img_emb, dim=1) # ensemble different scales for each region
|
470 |
-
img_emb = F.normalize(img_emb, p=2.0, dim=1)
|
471 |
-
|
472 |
-
# cosine similarity as logits
|
473 |
-
all_scores = torch.mm(img_emb, clss_emb_all.T)
|
474 |
-
scores, pred_cls = torch.max(all_scores, dim=1) # Note: [0, #cls-1] representing the categories. The value #cls represents "background".
|
475 |
-
pred_cls = pred_cls.item()
|
476 |
-
if pred_cls == label:
|
477 |
-
correct += 1
|
478 |
-
else:
|
479 |
-
wrong += 1
|
480 |
-
|
481 |
-
print("\n\nGot correct {} and wrong {}. Accuracy is {} / {} = {}\n\n".format(correct,wrong,correct,correct+wrong,correct/(correct+wrong)))
|
482 |
-
return
|
483 |
-
|
484 |
-
@META_ARCH_REGISTRY.register()
|
485 |
-
class CLIPFastRCNN(nn.Module):
|
486 |
-
"""
|
487 |
-
CLIP in Fast R-CNN format, where the cropping is conducted on feature maps instead of raw images.
|
488 |
-
It contains the following two components:
|
489 |
-
1. Localization modules: pretrained backbone+RPN or equivalent modules and is able to output object proposals
|
490 |
-
2. Recognition branch: initialized by CLIP and is able to recognize zero-shot regions
|
491 |
-
"""
|
492 |
-
@configurable
|
493 |
-
def __init__(
|
494 |
-
self,
|
495 |
-
*,
|
496 |
-
offline_backbone: Backbone,
|
497 |
-
backbone: Backbone,
|
498 |
-
backbone_type: str = "resnet",
|
499 |
-
text_backbone: Backbone,
|
500 |
-
offline_proposal_generator: nn.Module,
|
501 |
-
roi_heads: nn.Module,
|
502 |
-
pixel_mean: Tuple[float],
|
503 |
-
pixel_std: Tuple[float],
|
504 |
-
input_format: Optional[str] = None,
|
505 |
-
vis_period: int = 0,
|
506 |
-
clip_crop_region_type: str = 'GT',
|
507 |
-
use_clip_c4: False,
|
508 |
-
use_clip_attpool: False,
|
509 |
-
offline_input_format: Optional[str] = None,
|
510 |
-
offline_pixel_mean: Tuple[float],
|
511 |
-
offline_pixel_std: Tuple[float],
|
512 |
-
):
|
513 |
-
"""
|
514 |
-
Args:
|
515 |
-
backbone: a backbone module, must follow detectron2's backbone interface
|
516 |
-
proposal_generator: a module that generates proposals using backbone features
|
517 |
-
roi_heads: a ROI head that performs per-region computation
|
518 |
-
pixel_mean, pixel_std: list or tuple with #channels element, representing
|
519 |
-
the per-channel mean and std to be used to normalize the input image
|
520 |
-
input_format: describe the meaning of channels of input. Needed by visualization
|
521 |
-
vis_period: the period to run visualization. Set to 0 to disable.
|
522 |
-
"""
|
523 |
-
super().__init__()
|
524 |
-
self.offline_backbone = offline_backbone
|
525 |
-
self.backbone = backbone
|
526 |
-
self.backbone_type = backbone_type
|
527 |
-
self.offline_proposal_generator = offline_proposal_generator
|
528 |
-
self.roi_heads = roi_heads
|
529 |
-
self.lang_encoder = text_backbone
|
530 |
-
|
531 |
-
self.input_format = input_format
|
532 |
-
self.vis_period = vis_period
|
533 |
-
if vis_period > 0:
|
534 |
-
assert input_format is not None, "input_format is required for visualization!"
|
535 |
-
|
536 |
-
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
|
537 |
-
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
|
538 |
-
assert (
|
539 |
-
self.pixel_mean.shape == self.pixel_std.shape
|
540 |
-
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
|
541 |
-
if np.sum(pixel_mean) < 3.0: # converrt pixel value to range [0.0, 1.0] by dividing 255.0
|
542 |
-
assert input_format == 'RGB'
|
543 |
-
self.div_pixel = True
|
544 |
-
else: # default setting
|
545 |
-
self.div_pixel = False
|
546 |
-
|
547 |
-
# input format, pixel mean and std for offline modules
|
548 |
-
if offline_input_format and offline_pixel_mean and offline_pixel_std:
|
549 |
-
self.offline_input_format = offline_input_format
|
550 |
-
self.register_buffer("offline_pixel_mean", torch.tensor(offline_pixel_mean).view(-1, 1, 1), False)
|
551 |
-
self.register_buffer("offline_pixel_std", torch.tensor(offline_pixel_std).view(-1, 1, 1), False)
|
552 |
-
if np.sum(offline_pixel_mean) < 3.0: # converrt pixel value to range [0.0, 1.0] by dividing 255.0
|
553 |
-
assert offline_input_format == 'RGB'
|
554 |
-
self.offline_div_pixel = True
|
555 |
-
else: # default setting
|
556 |
-
self.offline_div_pixel = False
|
557 |
-
|
558 |
-
self.clip_crop_region_type = clip_crop_region_type
|
559 |
-
self.use_clip_c4 = use_clip_c4 # if True, use C4 mode where roi_head uses the last resnet layer from backbone
|
560 |
-
self.use_clip_attpool = use_clip_attpool # if True (C4+text_emb_as_classifier), use att_pool to replace default mean pool
|
561 |
-
|
562 |
-
|
563 |
-
@classmethod
|
564 |
-
def from_config(cls, cfg):
|
565 |
-
if cfg.MODEL.CLIP.CROP_REGION_TYPE == "RPN": # create isolated backbone & RPN
|
566 |
-
# create offline cfg for the pretrained backbone & RPN
|
567 |
-
from detectron2.config import get_cfg
|
568 |
-
offline_cfg = get_cfg()
|
569 |
-
offline_cfg.merge_from_file(cfg.MODEL.CLIP.OFFLINE_RPN_CONFIG)
|
570 |
-
if cfg.MODEL.CLIP.OFFLINE_RPN_LSJ_PRETRAINED: # large-scale jittering (LSJ) pretrained RPN
|
571 |
-
offline_cfg.MODEL.BACKBONE.FREEZE_AT = 0 # make all fronzon layers to "SyncBN"
|
572 |
-
offline_cfg.MODEL.RESNETS.NORM = "SyncBN" # 5 resnet layers
|
573 |
-
offline_cfg.MODEL.FPN.NORM = "SyncBN" # fpn layers
|
574 |
-
offline_cfg.MODEL.RPN.CONV_DIMS = [-1, -1] # rpn layers
|
575 |
-
if cfg.MODEL.CLIP.OFFLINE_RPN_NMS_THRESH:
|
576 |
-
offline_cfg.MODEL.RPN.NMS_THRESH = cfg.MODEL.CLIP.OFFLINE_RPN_NMS_THRESH # 0.9
|
577 |
-
offline_cfg.MODEL.RPN.POST_NMS_TOPK_TEST = cfg.MODEL.RPN.POST_NMS_TOPK_TEST # 0.9
|
578 |
-
|
579 |
-
# create offline backbone and RPN
|
580 |
-
offline_backbone = build_backbone(offline_cfg) # build_resnet_fpn_backbone(cfg, ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)))
|
581 |
-
offline_rpn = build_proposal_generator(offline_cfg, offline_backbone.output_shape())
|
582 |
-
# convert to evaluation mode
|
583 |
-
for p in offline_backbone.parameters(): p.requires_grad = False
|
584 |
-
for p in offline_rpn.parameters(): p.requires_grad = False
|
585 |
-
offline_backbone.eval()
|
586 |
-
offline_rpn.eval()
|
587 |
-
elif cfg.MODEL.CLIP.CROP_REGION_TYPE == "GT":
|
588 |
-
offline_backbone = None
|
589 |
-
offline_rpn = None
|
590 |
-
offline_cfg = None
|
591 |
-
|
592 |
-
backbone = build_backbone(cfg)
|
593 |
-
text_backbone = build_clip_language_encoder(cfg)
|
594 |
-
|
595 |
-
backbone_type = "swin" if "swin" in cfg.MODEL.BACKBONE.NAME else "resnet"
|
596 |
-
|
597 |
-
if backbone_type == "swin":
|
598 |
-
roi_heads = build_roi_heads(cfg, backbone.image_encoder.output_shape())
|
599 |
-
else:
|
600 |
-
roi_heads = build_roi_heads(cfg, backbone.output_shape())
|
601 |
-
|
602 |
-
return {
|
603 |
-
"offline_backbone": offline_backbone,
|
604 |
-
"offline_proposal_generator": offline_rpn,
|
605 |
-
"backbone": backbone,
|
606 |
-
"backbone_type": backbone_type,
|
607 |
-
"text_backbone": text_backbone,
|
608 |
-
"roi_heads": roi_heads,
|
609 |
-
"input_format": cfg.INPUT.FORMAT,
|
610 |
-
"vis_period": cfg.VIS_PERIOD,
|
611 |
-
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
|
612 |
-
"pixel_std": cfg.MODEL.PIXEL_STD,
|
613 |
-
"clip_crop_region_type" : cfg.MODEL.CLIP.CROP_REGION_TYPE,
|
614 |
-
"use_clip_c4": 'FPN' not in cfg.MODEL.BACKBONE.NAME,
|
615 |
-
"use_clip_attpool": cfg.MODEL.ROI_HEADS.NAME in ['CLIPRes5ROIHeads', 'CLIPStandardROIHeads'] and cfg.MODEL.CLIP.USE_TEXT_EMB_CLASSIFIER,
|
616 |
-
"offline_input_format": offline_cfg.INPUT.FORMAT if offline_cfg else None,
|
617 |
-
"offline_pixel_mean": offline_cfg.MODEL.PIXEL_MEAN if offline_cfg else None,
|
618 |
-
"offline_pixel_std": offline_cfg.MODEL.PIXEL_STD if offline_cfg else None,
|
619 |
-
}
|
620 |
-
|
621 |
-
@property
|
622 |
-
def device(self):
|
623 |
-
return self.pixel_mean.device
|
624 |
-
|
625 |
-
def forward(self, queries, batched_inputs: List[Dict[str, torch.Tensor]]):
|
626 |
-
"""
|
627 |
-
Args:
|
628 |
-
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
|
629 |
-
Each item in the list contains the inputs for one image.
|
630 |
-
For now, each item in the list is a dict that contains:
|
631 |
-
|
632 |
-
* image: Tensor, image in (C, H, W) format.
|
633 |
-
* instances (optional): groundtruth :class:`Instances`
|
634 |
-
* proposals (optional): :class:`Instances`, precomputed proposals.
|
635 |
-
|
636 |
-
Other information that's included in the original dicts, such as:
|
637 |
-
|
638 |
-
* "height", "width" (int): the output resolution of the model, used in inference.
|
639 |
-
See :meth:`postprocess` for details.
|
640 |
-
|
641 |
-
Returns:
|
642 |
-
list[dict]:
|
643 |
-
Each dict is the output for one input image.
|
644 |
-
The dict contains one key "instances" whose value is a :class:`Instances`.
|
645 |
-
The :class:`Instances` object has the following keys:
|
646 |
-
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
|
647 |
-
"""
|
648 |
-
if not self.training:
|
649 |
-
return self.inference(queries, batched_inputs)
|
650 |
-
if "instances" in batched_inputs[0]:
|
651 |
-
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
|
652 |
-
else:
|
653 |
-
gt_instances = None
|
654 |
-
|
655 |
-
# localization branch: offline modules to get the region proposals
|
656 |
-
with torch.no_grad():
|
657 |
-
if self.clip_crop_region_type == "GT": # from ground-truth
|
658 |
-
proposals = []
|
659 |
-
for r_i, b_input in enumerate(batched_inputs):
|
660 |
-
this_gt = copy.deepcopy(b_input["instances"]) # Instance
|
661 |
-
gt_boxes = this_gt._fields['gt_boxes'].to(self.device)
|
662 |
-
this_gt._fields = {'proposal_boxes': gt_boxes, 'objectness_logits': torch.ones(gt_boxes.tensor.size(0)).to(self.device)}
|
663 |
-
proposals.append(this_gt)
|
664 |
-
elif self.clip_crop_region_type == "RPN": # from the backbone & RPN of standard Mask-RCNN, trained on base classes
|
665 |
-
if self.offline_backbone.training or self.offline_proposal_generator.training: # was set to True in training script
|
666 |
-
self.offline_backbone.eval()
|
667 |
-
self.offline_proposal_generator.eval()
|
668 |
-
images = self.offline_preprocess_image(batched_inputs)
|
669 |
-
features = self.offline_backbone(images.tensor)
|
670 |
-
if self.offline_proposal_generator is not None:
|
671 |
-
proposals, _ = self.offline_proposal_generator(images, features, None)
|
672 |
-
|
673 |
-
# recognition branch: get 2D feature maps using the backbone of recognition branch
|
674 |
-
images = self.preprocess_image(batched_inputs)
|
675 |
-
features = self.backbone(images.tensor)
|
676 |
-
|
677 |
-
if self.backbone_type == "resnet":
|
678 |
-
head = self.backbone.layer4
|
679 |
-
elif self.backbone_type == "swin":
|
680 |
-
head = self.backbone.layers[-1]
|
681 |
-
|
682 |
-
# Given the proposals, crop region features from 2D image features and classify the regions
|
683 |
-
if self.use_clip_c4: # use C4 + resnet weights from CLIP
|
684 |
-
if self.use_clip_attpool: # use att_pool from CLIP to match dimension
|
685 |
-
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances, res5=head, attnpool=self.backbone.attnpool)
|
686 |
-
else: # use default mean pool
|
687 |
-
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances, res5=head)
|
688 |
-
else: # default setting
|
689 |
-
if self.use_clip_attpool: # use att_pool from CLIP to match dimension
|
690 |
-
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances, attnpool=self.backbone.bottom_up.attnpool)
|
691 |
-
else: # use default mean pool
|
692 |
-
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
|
693 |
-
if self.vis_period > 0:
|
694 |
-
storage = get_event_storage()
|
695 |
-
if storage.iter % self.vis_period == 0:
|
696 |
-
self.visualize_training(batched_inputs, proposals)
|
697 |
-
#visualize_proposals(batched_inputs, proposals, self.input_format)
|
698 |
-
|
699 |
-
losses = {}
|
700 |
-
losses.update(detector_losses)
|
701 |
-
return losses
|
702 |
-
|
703 |
-
def inference(
|
704 |
-
self,
|
705 |
-
queries,
|
706 |
-
batched_inputs: List[Dict[str, torch.Tensor]],
|
707 |
-
detected_instances: Optional[List[Instances]] = None,
|
708 |
-
do_postprocess: bool = True,
|
709 |
-
):
|
710 |
-
"""
|
711 |
-
Run inference on the given inputs.
|
712 |
-
|
713 |
-
Args:
|
714 |
-
batched_inputs (list[dict]): same as in :meth:`forward`
|
715 |
-
detected_instances (None or list[Instances]): if not None, it
|
716 |
-
contains an `Instances` object per image. The `Instances`
|
717 |
-
object contains "pred_boxes" and "pred_classes" which are
|
718 |
-
known boxes in the image.
|
719 |
-
The inference will then skip the detection of bounding boxes,
|
720 |
-
and only predict other per-ROI outputs.
|
721 |
-
do_postprocess (bool): whether to apply post-processing on the outputs.
|
722 |
-
|
723 |
-
Returns:
|
724 |
-
When do_postprocess=True, same as in :meth:`forward`.
|
725 |
-
Otherwise, a list[Instances] containing raw network outputs.
|
726 |
-
"""
|
727 |
-
assert not self.training
|
728 |
-
|
729 |
-
# localization branch: offline modules to get the region proposals
|
730 |
-
if self.clip_crop_region_type == "GT": # from ground-truth
|
731 |
-
proposals = []
|
732 |
-
for r_i, b_input in enumerate(batched_inputs):
|
733 |
-
this_gt = copy.deepcopy(b_input["instances"]) # Instance
|
734 |
-
gt_boxes = this_gt._fields['gt_boxes'].to(self.device)
|
735 |
-
this_gt._fields = {'proposal_boxes': gt_boxes} #, 'objectness_logits': None}
|
736 |
-
proposals.append(this_gt)
|
737 |
-
elif self.clip_crop_region_type == "RPN": # from the backbone & RPN of standard Mask-RCNN, trained on base classes
|
738 |
-
images = self.offline_preprocess_image(batched_inputs)
|
739 |
-
features = self.offline_backbone(images.tensor)
|
740 |
-
if detected_instances is None:
|
741 |
-
if self.offline_proposal_generator is not None:
|
742 |
-
proposals, _ = self.offline_proposal_generator(images, features, None)
|
743 |
-
|
744 |
-
# recognition branch: get 2D feature maps using the backbone of recognition branch
|
745 |
-
print(batched_inputs[0]['image'][0][:10, :10])
|
746 |
-
print(batched_inputs[0]['image'].shape)
|
747 |
-
images = self.preprocess_image(batched_inputs)
|
748 |
-
|
749 |
-
if self.backbone_type == "swin":
|
750 |
-
features = self.backbone.encode_image(images.tensor)
|
751 |
-
text_features = self.backbone.encode_text(queries)
|
752 |
-
else:
|
753 |
-
features = self.backbone(images.tensor)
|
754 |
-
token_embeddings = pre_tokenize([queries]).to(images.tensor.device)[0]
|
755 |
-
text_features = self.lang_encoder.encode_text(token_embeddings)
|
756 |
-
text_features = text_features.mean(0, keepdim=True)
|
757 |
-
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
|
758 |
-
|
759 |
-
if self.backbone_type == "resnet":
|
760 |
-
head = self.backbone.layer4
|
761 |
-
downsampler = None
|
762 |
-
norm = None
|
763 |
-
vision_projection = None
|
764 |
-
elif self.backbone_type == "swin":
|
765 |
-
downsampler = self.backbone.image_encoder.layers[-2].downsample
|
766 |
-
head = self.backbone.image_encoder.layers[-1]
|
767 |
-
norm = self.backbone.image_encoder.norm
|
768 |
-
vision_projection = self.backbone.image_projection
|
769 |
-
|
770 |
-
# Given the proposals, crop region features from 2D image features and classify the regions
|
771 |
-
if self.use_clip_c4: # use C4 + resnet weights from CLIP
|
772 |
-
if self.use_clip_attpool: # use att_pool from CLIP to match dimension
|
773 |
-
results, _ = self.roi_heads(images, features, proposals, text_features, None,
|
774 |
-
res5=head, ds=downsampler, norm=norm, vision_projection=vision_projection, attnpool=self.backbone.attnpool)
|
775 |
-
else: # use default mean pool
|
776 |
-
results, _ = self.roi_heads(images, features, proposals, text_features, None,
|
777 |
-
res5=head, ds=downsampler, norm=norm, vision_projection=vision_projection)
|
778 |
-
else: # default setting
|
779 |
-
if self.use_clip_attpool: # use att_pool from CLIP to match dimension
|
780 |
-
results, _ = self.roi_heads(images, features, proposals, text_features, None,
|
781 |
-
attnpool=self.backbone.bottom_up.attnpool)
|
782 |
-
else:
|
783 |
-
results, _ = self.roi_heads(images, features, proposals, text_features, None)
|
784 |
-
|
785 |
-
visualize_proposals(batched_inputs, proposals, self.input_format)
|
786 |
-
vis = visualize_results(batched_inputs, results, self.input_format)
|
787 |
-
return vis
|
788 |
-
|
789 |
-
def offline_preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
790 |
-
"""
|
791 |
-
Normalize, pad and batch the input images. Use detectron2 default processing (pixel mean & std).
|
792 |
-
Note: Due to FPN size_divisibility, images are padded by right/bottom border. So FPN is consistent with C4 and GT boxes.
|
793 |
-
"""
|
794 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
795 |
-
if (self.input_format == 'RGB' and self.offline_input_format == 'BGR') or \
|
796 |
-
(self.input_format == 'BGR' and self.offline_input_format == 'RGB'): # the input image follows the main config format ('RGB' or 'BGR')
|
797 |
-
images = [x[[2,1,0],:,:] for x in images]
|
798 |
-
if self.offline_div_pixel:
|
799 |
-
images = [((x / 255.0) - self.offline_pixel_mean) / self.offline_pixel_std for x in images]
|
800 |
-
else:
|
801 |
-
images = [(x - self.offline_pixel_mean) / self.offline_pixel_std for x in images]
|
802 |
-
images = ImageList.from_tensors(images, self.offline_backbone.size_divisibility)
|
803 |
-
return images
|
804 |
-
|
805 |
-
def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
806 |
-
"""
|
807 |
-
Normalize, pad and batch the input images. Use CLIP default processing (pixel mean & std).
|
808 |
-
Note: Due to FPN size_divisibility, images are padded by right/bottom border. So FPN is consistent with C4 and GT boxes.
|
809 |
-
"""
|
810 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
811 |
-
if self.div_pixel:
|
812 |
-
images = [((x / 255.0) - self.pixel_mean) / self.pixel_std for x in images]
|
813 |
-
else:
|
814 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
815 |
-
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
|
816 |
-
return images
|
817 |
-
|
818 |
-
@staticmethod
|
819 |
-
def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]]):
|
820 |
-
"""
|
821 |
-
Rescale the output instances to the target size.
|
822 |
-
"""
|
823 |
-
# note: private function; subject to changes
|
824 |
-
processed_results = []
|
825 |
-
for results_per_image, input_per_image in zip(
|
826 |
-
instances, batched_inputs):
|
827 |
-
height = input_per_image["height"] # original image size, before resizing
|
828 |
-
width = input_per_image["width"] # original image size, before resizing
|
829 |
-
r = detector_postprocess(results_per_image, height, width)
|
830 |
-
processed_results.append({"instances": r})
|
831 |
-
return processed_results
|
832 |
-
|
833 |
-
@META_ARCH_REGISTRY.register()
|
834 |
-
class PretrainFastRCNN(nn.Module):
|
835 |
-
"""
|
836 |
-
Open-vocabulary region representation via vision-language pretraining from image-text pairs
|
837 |
-
1. image-text level matching: weakly supervised grounding task with contrastive learning based on region-token representation
|
838 |
-
2. region-token level matching: use pseudo text to train model, provided by teacher model
|
839 |
-
"""
|
840 |
-
@configurable
|
841 |
-
def __init__(
|
842 |
-
self,
|
843 |
-
*,
|
844 |
-
offline_backbone: Backbone,
|
845 |
-
backbone: Backbone,
|
846 |
-
offline_proposal_generator: nn.Module,
|
847 |
-
roi_heads: nn.Module,
|
848 |
-
teacher_backbone: nn.Module,
|
849 |
-
teacher_roi_heads: nn.Module,
|
850 |
-
pixel_mean: Tuple[float],
|
851 |
-
pixel_std: Tuple[float],
|
852 |
-
input_format: Optional[str] = None,
|
853 |
-
vis_period: int = 0,
|
854 |
-
clip_crop_region_type: str = 'GT',
|
855 |
-
use_clip_c4: False,
|
856 |
-
use_clip_attpool: False,
|
857 |
-
offline_input_format: Optional[str] = None,
|
858 |
-
offline_pixel_mean: Tuple[float],
|
859 |
-
offline_pixel_std: Tuple[float],
|
860 |
-
language_encoder: nn.Module,
|
861 |
-
matching_temp: None,
|
862 |
-
num_regions_per_img: int = 0,
|
863 |
-
img_txt_level: None,
|
864 |
-
gather_gpus: False,
|
865 |
-
grid_regions: False,
|
866 |
-
concept_emb: None,
|
867 |
-
):
|
868 |
-
"""
|
869 |
-
Args:
|
870 |
-
backbone: a backbone module, must follow detectron2's backbone interface
|
871 |
-
proposal_generator: a module that generates proposals using backbone features
|
872 |
-
roi_heads: a ROI head that performs per-region computation
|
873 |
-
pixel_mean, pixel_std: list or tuple with #channels element, representing
|
874 |
-
the per-channel mean and std to be used to normalize the input image
|
875 |
-
input_format: describe the meaning of channels of input. Needed by visualization
|
876 |
-
vis_period: the period to run visualization. Set to 0 to disable.
|
877 |
-
"""
|
878 |
-
super().__init__()
|
879 |
-
self.offline_backbone = offline_backbone
|
880 |
-
self.backbone = backbone
|
881 |
-
self.offline_proposal_generator = offline_proposal_generator
|
882 |
-
self.roi_heads = roi_heads
|
883 |
-
|
884 |
-
self.input_format = input_format
|
885 |
-
self.vis_period = vis_period
|
886 |
-
if vis_period > 0:
|
887 |
-
assert input_format is not None, "input_format is required for visualization!"
|
888 |
-
|
889 |
-
self.register_buffer("pixel_mean", torch.tensor(pixel_mean).view(-1, 1, 1), False)
|
890 |
-
self.register_buffer("pixel_std", torch.tensor(pixel_std).view(-1, 1, 1), False)
|
891 |
-
assert (
|
892 |
-
self.pixel_mean.shape == self.pixel_std.shape
|
893 |
-
), f"{self.pixel_mean} and {self.pixel_std} have different shapes!"
|
894 |
-
if np.sum(pixel_mean) < 3.0: # converrt pixel value to range [0.0, 1.0] by dividing 255.0
|
895 |
-
assert input_format == 'RGB'
|
896 |
-
self.div_pixel = True
|
897 |
-
else: # default setting
|
898 |
-
self.div_pixel = False
|
899 |
-
|
900 |
-
# input format, pixel mean and std for offline modules
|
901 |
-
if offline_input_format and offline_pixel_mean and offline_pixel_std:
|
902 |
-
self.offline_input_format = offline_input_format
|
903 |
-
self.register_buffer("offline_pixel_mean", torch.tensor(offline_pixel_mean).view(-1, 1, 1), False)
|
904 |
-
self.register_buffer("offline_pixel_std", torch.tensor(offline_pixel_std).view(-1, 1, 1), False)
|
905 |
-
if np.sum(offline_pixel_mean) < 3.0: # converrt pixel value to range [0.0, 1.0] by dividing 255.0
|
906 |
-
assert offline_input_format == 'RGB'
|
907 |
-
self.offline_div_pixel = True
|
908 |
-
else: # default setting
|
909 |
-
self.offline_div_pixel = False
|
910 |
-
|
911 |
-
self.clip_crop_region_type = clip_crop_region_type
|
912 |
-
self.use_clip_c4 = use_clip_c4 # if True, use C4 mode where roi_head uses the last resnet layer from backbone
|
913 |
-
self.use_clip_attpool = use_clip_attpool # if True (C4+text_emb_as_classifier), use att_pool to replace default mean pool
|
914 |
-
|
915 |
-
# image-text level pretraining
|
916 |
-
self.img_txt_level = img_txt_level[0]
|
917 |
-
self.only_eot = img_txt_level[1]
|
918 |
-
if self.img_txt_level:
|
919 |
-
self.lang_encoder = language_encoder
|
920 |
-
for p in self.lang_encoder.parameters(): # freeze language encoder
|
921 |
-
p.requires_grad = False
|
922 |
-
if matching_temp > 0.0: # fixed temp
|
923 |
-
self.matching_temp = matching_temp
|
924 |
-
else: # leanable temp
|
925 |
-
self.matching_temp = nn.Parameter(torch.ones([]) * 4.6052) # nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
|
926 |
-
self.context_length = 77 # defined in clip_img_txt_pair_tsv class
|
927 |
-
self.num_regions_per_img = num_regions_per_img
|
928 |
-
self.gather_gpus = gather_gpus
|
929 |
-
self.grid_regions = grid_regions
|
930 |
-
|
931 |
-
# region-token level pretraining
|
932 |
-
if concept_emb[0]:
|
933 |
-
self.register_buffer("concept_emb", torch.load(concept_emb[0]), False) # [#concepts, 1024]
|
934 |
-
self.concept_thres = concept_emb[1]
|
935 |
-
self.teacher_backbone = teacher_backbone # None
|
936 |
-
# when resume, create teacher model in advance to load ckpt
|
937 |
-
# self.teacher_backbone = copy.deepcopy(self.backbone)
|
938 |
-
# # # oai_clip = torch.load("/mnt/output_storage/trained_models/oai_clip_weights/RN50_OAI_CLIP.pth") #("/home/v-yiwuzhong/projects/azureblobs/vyiwuzhong_phillytools/trained_models/oai_clip_weights/RN50_OAI_CLIP.pth")
|
939 |
-
# # # oai_clip_visual = {}
|
940 |
-
# # # for key in oai_clip['model']:
|
941 |
-
# # # if 'visual' in key and 'num_batches_tracked' not in key:
|
942 |
-
# # # oai_clip_visual[key.replace('visual.','')] = oai_clip['model'][key]
|
943 |
-
# # # self.teacher_backbone.load_state_dict(oai_clip_visual)
|
944 |
-
for p in self.teacher_backbone.parameters(): # freeze visual encoder of teacher model
|
945 |
-
p.requires_grad = False
|
946 |
-
if concept_emb[2] is None: # teacher model uses the same concept embedding as student model
|
947 |
-
self.register_buffer("teacher_concept_emb", torch.load(concept_emb[0]), False)
|
948 |
-
else: # teacher model uses a seperate concept embedding
|
949 |
-
self.register_buffer("teacher_concept_emb", torch.load(concept_emb[2]), False)
|
950 |
-
self.teacher_roi_heads = teacher_roi_heads
|
951 |
-
else:
|
952 |
-
self.concept_emb = None
|
953 |
-
|
954 |
-
@classmethod
|
955 |
-
def from_config(cls, cfg):
|
956 |
-
if cfg.MODEL.CLIP.CROP_REGION_TYPE == "RPN": # create isolated backbone & RPN
|
957 |
-
# create offline cfg for the pretrained backbone & RPN
|
958 |
-
from detectron2.config import get_cfg
|
959 |
-
offline_cfg = get_cfg()
|
960 |
-
offline_cfg.merge_from_file(cfg.MODEL.CLIP.OFFLINE_RPN_CONFIG)
|
961 |
-
if cfg.MODEL.CLIP.OFFLINE_RPN_LSJ_PRETRAINED: # large-scale jittering (LSJ) pretrained RPN
|
962 |
-
offline_cfg.MODEL.BACKBONE.FREEZE_AT = 0 # make all fronzon layers to "SyncBN"
|
963 |
-
offline_cfg.MODEL.RESNETS.NORM = "SyncBN" # 5 resnet layers
|
964 |
-
offline_cfg.MODEL.FPN.NORM = "SyncBN" # fpn layers
|
965 |
-
offline_cfg.MODEL.RPN.CONV_DIMS = [-1, -1] # rpn layers
|
966 |
-
if cfg.MODEL.CLIP.PRETRAIN_RPN_REGIONS:
|
967 |
-
offline_cfg.MODEL.RPN.POST_NMS_TOPK_TEST = cfg.MODEL.CLIP.PRETRAIN_RPN_REGIONS
|
968 |
-
if cfg.MODEL.CLIP.OFFLINE_RPN_NMS_THRESH:
|
969 |
-
offline_cfg.MODEL.RPN.NMS_THRESH = cfg.MODEL.CLIP.OFFLINE_RPN_NMS_THRESH # 0.9
|
970 |
-
# offline_cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.6
|
971 |
-
# print("\n\n Set offline RPN.NMS_THRESH to {} and ROI_HEADS.NMS_THRESH_TEST to {}.\n\n".format(offline_cfg.MODEL.RPN.NMS_THRESH, offline_cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST))
|
972 |
-
# create offline backbone and RPN
|
973 |
-
offline_backbone = build_backbone(offline_cfg) # build_resnet_fpn_backbone(cfg, ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)))
|
974 |
-
offline_rpn = build_proposal_generator(offline_cfg, offline_backbone.output_shape())
|
975 |
-
# convert to evaluation mode
|
976 |
-
for p in offline_backbone.parameters(): p.requires_grad = False
|
977 |
-
for p in offline_rpn.parameters(): p.requires_grad = False
|
978 |
-
offline_backbone.eval()
|
979 |
-
offline_rpn.eval()
|
980 |
-
elif cfg.MODEL.CLIP.CROP_REGION_TYPE in ["GLOBAL", "GRID", "RANDOM"]:
|
981 |
-
offline_backbone = None
|
982 |
-
offline_rpn = None
|
983 |
-
offline_cfg = None
|
984 |
-
# visual encoder and roi_heads of student model
|
985 |
-
backbone = build_backbone(cfg)
|
986 |
-
|
987 |
-
if "swin" in cfg.MODEL.BACKBONE.NAME:
|
988 |
-
roi_heads = build_roi_heads(cfg, backbone.image_encoder.output_shape())
|
989 |
-
else:
|
990 |
-
roi_heads = build_roi_heads(cfg, backbone.output_shape())
|
991 |
-
|
992 |
-
# language encoder of student model
|
993 |
-
language_encoder = build_clip_language_encoder(cfg)
|
994 |
-
# visual encoder of teacher model
|
995 |
-
teacher_cfg = copy.deepcopy(cfg)
|
996 |
-
teacher_cfg.defrost()
|
997 |
-
teacher_cfg.MODEL.RESNETS.DEPTH = teacher_cfg.MODEL.CLIP.TEACHER_RESNETS_DEPTH
|
998 |
-
teacher_backbone = build_backbone(teacher_cfg)
|
999 |
-
teacher_cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = teacher_cfg.MODEL.CLIP.TEACHER_POOLER_RESOLUTION
|
1000 |
-
teacher_roi_heads = build_roi_heads(teacher_cfg, teacher_backbone.output_shape())
|
1001 |
-
return {
|
1002 |
-
"offline_backbone": offline_backbone,
|
1003 |
-
"offline_proposal_generator": offline_rpn,
|
1004 |
-
"backbone": backbone,
|
1005 |
-
"roi_heads": roi_heads,
|
1006 |
-
"teacher_backbone": teacher_backbone,
|
1007 |
-
"teacher_roi_heads": teacher_roi_heads,
|
1008 |
-
"input_format": cfg.INPUT.FORMAT,
|
1009 |
-
"vis_period": cfg.VIS_PERIOD,
|
1010 |
-
"pixel_mean": cfg.MODEL.PIXEL_MEAN,
|
1011 |
-
"pixel_std": cfg.MODEL.PIXEL_STD,
|
1012 |
-
"clip_crop_region_type" : cfg.MODEL.CLIP.CROP_REGION_TYPE,
|
1013 |
-
"use_clip_c4": 'FPN' not in cfg.MODEL.BACKBONE.NAME,
|
1014 |
-
"use_clip_attpool": cfg.MODEL.ROI_HEADS.NAME == 'PretrainRes5ROIHeads',
|
1015 |
-
"offline_input_format": offline_cfg.INPUT.FORMAT if offline_cfg else None,
|
1016 |
-
"offline_pixel_mean": offline_cfg.MODEL.PIXEL_MEAN if offline_cfg else None,
|
1017 |
-
"offline_pixel_std": offline_cfg.MODEL.PIXEL_STD if offline_cfg else None,
|
1018 |
-
"language_encoder": language_encoder,
|
1019 |
-
"matching_temp": cfg.MODEL.CLIP.CLSS_TEMP,
|
1020 |
-
"num_regions_per_img": cfg.MODEL.CLIP.PRETRAIN_SAMPLE_REGIONS,
|
1021 |
-
"img_txt_level": (cfg.MODEL.CLIP.PRETRAIN_IMG_TXT_LEVEL, cfg.MODEL.CLIP.PRETRAIN_ONLY_EOT),
|
1022 |
-
"gather_gpus": cfg.MODEL.CLIP.GATHER_GPUS,
|
1023 |
-
"grid_regions": cfg.MODEL.CLIP.GRID_REGIONS,
|
1024 |
-
"concept_emb": (cfg.MODEL.CLIP.CONCEPT_POOL_EMB, cfg.MODEL.CLIP.CONCEPT_THRES, cfg.MODEL.CLIP.TEACHER_CONCEPT_POOL_EMB),
|
1025 |
-
}
|
1026 |
-
|
1027 |
-
@property
|
1028 |
-
def device(self):
|
1029 |
-
return self.pixel_mean.device
|
1030 |
-
|
1031 |
-
def forward(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
1032 |
-
"""
|
1033 |
-
Args:
|
1034 |
-
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
|
1035 |
-
Each item in the list contains the inputs for one image.
|
1036 |
-
For now, each item in the list is a dict that contains:
|
1037 |
-
|
1038 |
-
* image: Tensor, image in (C, H, W) format.
|
1039 |
-
* instances (optional): groundtruth :class:`Instances`
|
1040 |
-
* proposals (optional): :class:`Instances`, precomputed proposals.
|
1041 |
-
|
1042 |
-
Other information that's included in the original dicts, such as:
|
1043 |
-
|
1044 |
-
* "height", "width" (int): the output resolution of the model, used in inference.
|
1045 |
-
See :meth:`postprocess` for details.
|
1046 |
-
|
1047 |
-
Returns:
|
1048 |
-
list[dict]:
|
1049 |
-
Each dict is the output for one input image.
|
1050 |
-
The dict contains one key "instances" whose value is a :class:`Instances`.
|
1051 |
-
The :class:`Instances` object has the following keys:
|
1052 |
-
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
|
1053 |
-
"""
|
1054 |
-
if not self.training:
|
1055 |
-
return self.inference(batched_inputs)
|
1056 |
-
if self.concept_emb is not None and self.teacher_backbone is None: # create a teacher model from an initialized student model; if resume, simply comment out this section
|
1057 |
-
self.teacher_backbone = copy.deepcopy(self.backbone)
|
1058 |
-
for p in self.teacher_backbone.parameters(): # freeze visual encoder of teacher model
|
1059 |
-
p.requires_grad = False
|
1060 |
-
gt_instances = None
|
1061 |
-
losses = {}
|
1062 |
-
|
1063 |
-
# localization branch: offline modules to get the region proposals
|
1064 |
-
proposals = self.get_region_proposals(batched_inputs)
|
1065 |
-
global_proposals = self.create_global_proposals(batched_inputs)
|
1066 |
-
# for prop, g_prop in zip(proposals, global_proposals): # append global proposal into each image
|
1067 |
-
# prop.proposal_boxes.tensor = torch.cat((prop.proposal_boxes.tensor, g_prop.tensor), dim=0)
|
1068 |
-
|
1069 |
-
# recognition branch: get 2D feature maps using the backbone of recognition branch
|
1070 |
-
images = self.preprocess_image(batched_inputs)
|
1071 |
-
features = self.backbone(images.tensor)
|
1072 |
-
region_feats = self.get_region_features(images, features, proposals, gt_instances)
|
1073 |
-
global_feats = self.get_region_features(images, features, global_proposals, gt_instances)
|
1074 |
-
|
1075 |
-
# image-text level matching
|
1076 |
-
if self.img_txt_level:
|
1077 |
-
self.image_text_matching(batched_inputs, proposals, region_feats, losses, global_feats=global_feats, only_global=True)
|
1078 |
-
|
1079 |
-
# region-phrase level matching
|
1080 |
-
if len(batched_inputs[0]) > 6: # controlled by dataset loading
|
1081 |
-
phrase_text_embs = self.encode_phrase_text(batched_inputs)
|
1082 |
-
else:
|
1083 |
-
phrase_text_embs = None
|
1084 |
-
|
1085 |
-
# region-concept level matching
|
1086 |
-
if self.concept_emb is not None:
|
1087 |
-
self.region_concept_matching(images, proposals, gt_instances, region_feats, losses, phrase_embs=phrase_text_embs)
|
1088 |
-
|
1089 |
-
return losses
|
1090 |
-
|
1091 |
-
def encode_phrase_text(self, batched_inputs):
|
1092 |
-
text = [x[6].view(-1,self.context_length).to(self.device) for i, x in enumerate(batched_inputs)]
|
1093 |
-
text = torch.cat(text, dim=0)
|
1094 |
-
text_embs = self.lang_encoder.encode_text(text, only_eot=True) # [#phrases, transformer.width]
|
1095 |
-
return text_embs
|
1096 |
-
|
1097 |
-
def region_concept_matching(self, images, proposals, gt_instances, region_feats, losses, phrase_embs=None):
|
1098 |
-
use_distill = True
|
1099 |
-
use_contrastive = True
|
1100 |
-
# get psuedo concept labels from teacher model
|
1101 |
-
concept_scores, target_inds, keep_regions, target_embs, label_mtx, phrase_label_mtx, phrase_target_regions \
|
1102 |
-
= self.get_psuedo_concept_labels(images, proposals, gt_instances, phrase_embs=phrase_embs)
|
1103 |
-
|
1104 |
-
# prepare region features for the kept regions
|
1105 |
-
keep_region_feats = region_feats[keep_regions]
|
1106 |
-
keep_region_feats = keep_region_feats / keep_region_feats.norm(dim=-1, keepdim=True)
|
1107 |
-
|
1108 |
-
if use_distill:
|
1109 |
-
# distillation learning: learns from the predictions of teacher model
|
1110 |
-
concept_emb = self.concept_emb / self.concept_emb.norm(dim=-1, keepdim=True)
|
1111 |
-
cls_scores = keep_region_feats @ concept_emb.t() # [#kept_regions, #concepts]
|
1112 |
-
if isinstance(self.matching_temp, float): # Typical good values are 100.0 for euclidean, 10.0 for dot, 0.01 for cosine
|
1113 |
-
cls_scores_temp = cls_scores / self.matching_temp
|
1114 |
-
else:
|
1115 |
-
cls_scores_temp = cls_scores * self.matching_temp.exp()
|
1116 |
-
|
1117 |
-
# loss weights
|
1118 |
-
#rpn_weights = torch.cat([torch.sigmoid(p.objectness_logits) for p in proposals])[keep_regions]
|
1119 |
-
#focal_weights = self.focal_scaling(cls_scores_temp, target_inds)
|
1120 |
-
|
1121 |
-
# calculate loss
|
1122 |
-
cls_loss = F.kl_div(F.softmax(cls_scores_temp, dim=1).log(), concept_scores, reduction='batchmean') # input is log-probabilities, target is probabilities
|
1123 |
-
#cls_loss = SoftTargetCrossEntropy()(cls_scores_temp, concept_scores)
|
1124 |
-
#cls_loss = F.cross_entropy(cls_scores_temp, target_inds)
|
1125 |
-
#cls_loss = (F.cross_entropy(cls_scores_temp, target_inds, reduction="none") * focal_weights).mean()
|
1126 |
-
losses.update({"loss_region_distill": cls_loss}) # * 0.8})
|
1127 |
-
|
1128 |
-
if use_contrastive:
|
1129 |
-
# contrastive learning: matching student visual features with target teacher concept embs
|
1130 |
-
target_embs = target_embs / target_embs.norm(dim=-1, keepdim=True)
|
1131 |
-
match_scores = keep_region_feats @ target_embs.t() # [#kept_regions, #kept_regions]
|
1132 |
-
if isinstance(self.matching_temp, float): # Typical good values are 100.0 for euclidean, 10.0 for dot, 0.01 for cosine
|
1133 |
-
match_scores_temp = match_scores / self.matching_temp
|
1134 |
-
else:
|
1135 |
-
match_scores_temp = match_scores * self.matching_temp.exp()
|
1136 |
-
|
1137 |
-
# loss weights
|
1138 |
-
#rpn_weights = torch.cat([torch.sigmoid(p.objectness_logits) for p in proposals])[keep_regions]
|
1139 |
-
#focal_weights = (1 - torch.sigmoid(torch.diag(match_scores_temp))) ** 0.8 # 1.0 # 2.0 #
|
1140 |
-
|
1141 |
-
# calculate loss given matching scores and label matrix
|
1142 |
-
contrastive_loss = MILCrossEntropy()(match_scores_temp, label_mtx, weights=None, avg_positives=False) # SoftTargetCrossEntropy()(match_scores_temp, label_mtx)
|
1143 |
-
#contrastive_loss = (MILCrossEntropy()(match_scores, label_mtx) + MILCrossEntropy()(match_scores.t(), label_mtx)) / 2.0
|
1144 |
-
losses.update({"loss_concept_contrastive": contrastive_loss})
|
1145 |
-
|
1146 |
-
if phrase_embs is not None:
|
1147 |
-
phrase_embs = phrase_embs / phrase_embs.norm(dim=-1, keepdim=True)
|
1148 |
-
phrase_scores = phrase_embs @ phrase_target_regions.t()
|
1149 |
-
if isinstance(self.matching_temp, float): # Typical good values are 100.0 for euclidean, 10.0 for dot, 0.01 for cosine
|
1150 |
-
phrase_scores_temp = phrase_scores / self.matching_temp
|
1151 |
-
else:
|
1152 |
-
phrase_scores_temp = phrase_scores * self.matching_temp.exp()
|
1153 |
-
contrastive_loss = MILCrossEntropy()(phrase_scores_temp, phrase_label_mtx, weights=None, avg_positives=False)
|
1154 |
-
#contrastive_loss = SoftTargetCrossEntropy()(phrase_scores_temp, phrase_label_mtx)
|
1155 |
-
losses.update({"loss_phrase_contrastive": contrastive_loss})
|
1156 |
-
|
1157 |
-
def image_text_matching(self, batched_inputs, proposals, region_feats, losses, global_feats=None, only_global=False):
|
1158 |
-
# encode text
|
1159 |
-
num_cap = int(batched_inputs[0][1].size(0) / self.context_length)
|
1160 |
-
if num_cap == 1: # one caption per image
|
1161 |
-
text = [x[1].view(1,-1).to(self.device) for x in batched_inputs]
|
1162 |
-
else: # multiple caption pers image, then randomly pick one
|
1163 |
-
rand_ind = [randint(0, num_cap-1) for _ in range(len(batched_inputs))]
|
1164 |
-
text = [x[1].view(-1,self.context_length)[rand_ind[i]:rand_ind[i]+1].to(self.device) for i, x in enumerate(batched_inputs)]
|
1165 |
-
text = torch.cat(text, dim=0)
|
1166 |
-
text_embs = self.lang_encoder.encode_text(text, only_eot=self.only_eot) # [img_batch, n_ctx, transformer.width] or [img_batch, transformer.width]
|
1167 |
-
eot_pos = text.argmax(dim=-1)
|
1168 |
-
|
1169 |
-
# prepare region features and text embeddings
|
1170 |
-
if isinstance(proposals[0], Boxes):
|
1171 |
-
num_bbs = [len(prop) for prop in proposals]
|
1172 |
-
else:
|
1173 |
-
num_bbs = [len(prop.proposal_boxes) for prop in proposals]
|
1174 |
-
if global_feats is not None and only_global: # only global feature
|
1175 |
-
assert self.only_eot
|
1176 |
-
region_feats = global_feats
|
1177 |
-
region_feats = region_feats / region_feats.norm(dim=-1, keepdim=True)
|
1178 |
-
text_embs = text_embs / text_embs.norm(dim=-1, keepdim=True)
|
1179 |
-
num_bbs = [1 for _ in num_bbs]
|
1180 |
-
elif global_feats is not None and not only_global: # combine both global and region features
|
1181 |
-
assert self.only_eot
|
1182 |
-
keep_num = 20
|
1183 |
-
region_feats = region_feats.split(num_bbs)
|
1184 |
-
region_feats = [torch.mean(rg_f, dim=0, keepdim=True) for rg_f in region_feats]
|
1185 |
-
region_g_feats = [torch.cat((r_f[:keep_num], global_feats[i:i+1]), dim=0) for i, r_f in enumerate(region_feats)]
|
1186 |
-
region_g_feats = [torch.mean(rg_f, dim=0, keepdim=True) for rg_f in region_g_feats]
|
1187 |
-
region_g_feats = [rg_f / rg_f.norm(dim=-1, keepdim=True) for rg_f in region_g_feats]
|
1188 |
-
region_feats = torch.cat(region_g_feats)
|
1189 |
-
text_embs = text_embs / text_embs.norm(dim=-1, keepdim=True)
|
1190 |
-
num_bbs = [1 for _ in num_bbs]
|
1191 |
-
else: # only region features
|
1192 |
-
num_bbs = torch.tensor(num_bbs).long().to(self.device)
|
1193 |
-
|
1194 |
-
region_feats_full, min_bs = gather_tensors(region_feats) if self.gather_gpus else (region_feats, None) # gather across GPUs
|
1195 |
-
text_embs_full, min_bs = gather_tensors(text_embs) if self.gather_gpus else (text_embs, None) # gather across GPUs
|
1196 |
-
|
1197 |
-
# matching visual features with text embs
|
1198 |
-
match_scores = region_feats_full @ text_embs_full.view(-1, text_embs_full.size(-1)).t() # [#regions, img_batch * n_ctx]
|
1199 |
-
if global_feats is not None: # only global feature or combine both global and region features
|
1200 |
-
img_b = int(region_feats_full.size(0))
|
1201 |
-
pooled_score = match_scores
|
1202 |
-
else: # only region features
|
1203 |
-
eot_pos_full, min_bs = gather_tensors(eot_pos) if self.gather_gpus else (eot_pos, None) # gather across GPUs
|
1204 |
-
num_bbs_full, min_bs = gather_tensors(num_bbs) if self.gather_gpus else (num_bbs, None) # gather across GPUs
|
1205 |
-
pooled_score = []
|
1206 |
-
token_b = self.context_length
|
1207 |
-
# region_b = self.num_regions_per_img if global_feats is None else 1
|
1208 |
-
# img_b = int(region_feats_full.size(0) / region_b)
|
1209 |
-
img_b = num_bbs_full.size(0)
|
1210 |
-
rb_start = 0 # the starting index of regions
|
1211 |
-
for i in range(img_b): # for each image
|
1212 |
-
region_b = num_bbs_full[i].item()
|
1213 |
-
for j in range(img_b): # for each text
|
1214 |
-
if self.only_eot: # sentence level embs
|
1215 |
-
# max pool over regions
|
1216 |
-
this_s = torch.max(match_scores[rb_start:(rb_start+region_b), j:(j+1)], dim=0)[0]
|
1217 |
-
else: # token level embs
|
1218 |
-
# 3. softmax over regions as soft attention, then multiply attention with original logits, finally sum over matrix and divided by #tokens
|
1219 |
-
# this_matrix = match_scores[rb_start:(rb_start+region_b), j*token_b:(j*token_b+eot_pos_full[j]+1)]
|
1220 |
-
# this_att = F.softmax(this_matrix, dim=0)
|
1221 |
-
# this_s = torch.sum(this_matrix * this_att) / (eot_pos_full[j]+1)
|
1222 |
-
# 2. max pool over regions, and then avg over text tokens
|
1223 |
-
# this_s = torch.sum(torch.max(match_scores[rb_start:(rb_start+region_b), j*token_b:(j*token_b+eot_pos_full[j]+1)], dim=0)[0]) / (eot_pos_full[j]+1)
|
1224 |
-
# 1. max pool over regions, and then sum over text tokens
|
1225 |
-
this_s = torch.sum(torch.max(match_scores[rb_start:(rb_start+region_b), j*token_b:(j*token_b+eot_pos_full[j]+1)], dim=0)[0])
|
1226 |
-
pooled_score.append(this_s.view(1,1))
|
1227 |
-
rb_start += region_b
|
1228 |
-
assert rb_start == match_scores.size(0)
|
1229 |
-
pooled_score = torch.cat(pooled_score).view(img_b, img_b) # diagnal elements are positive pairs and the others are negative pairs
|
1230 |
-
|
1231 |
-
if isinstance(self.matching_temp,float): # Typical good values are 100.0 for euclidean, 10.0 for dot, 0.01 for cosine
|
1232 |
-
pooled_score = pooled_score / self.matching_temp
|
1233 |
-
else:
|
1234 |
-
pooled_score = pooled_score * self.matching_temp.exp()
|
1235 |
-
contrast_target = torch.arange(img_b).to(self.device)
|
1236 |
-
row_loss = F.cross_entropy(pooled_score, contrast_target)
|
1237 |
-
col_loss = F.cross_entropy(pooled_score.t(), contrast_target)
|
1238 |
-
losses.update({"loss_img_txt_level": (row_loss + col_loss) / 2.0}) # losses.update({"loss_img_txt_level": (row_loss + col_loss) / 4.0}) #
|
1239 |
-
|
1240 |
-
def focal_scaling(self, logits, targets, gamma=1.0):
|
1241 |
-
p = F.softmax(logits, dim=1)
|
1242 |
-
p_t = p[torch.arange(p.size(0)).to(p.device), targets] # get prob of target class
|
1243 |
-
weights = (1 - p_t) ** gamma
|
1244 |
-
return weights
|
1245 |
-
|
1246 |
-
def get_psuedo_concept_labels(self, images, proposals, gt_instances, s_temp=0.01, norm=True, phrase_embs=None):
|
1247 |
-
""" Input images and region proposals, return matching results from teacher model
|
1248 |
-
"""
|
1249 |
-
with torch.no_grad():
|
1250 |
-
# extract visual features from teacher model
|
1251 |
-
features = self.teacher_backbone(images.tensor)
|
1252 |
-
teacher_region_feats = self.teacher_roi_heads(images, features, proposals, gt_instances, res5=self.teacher_backbone.layer4, attnpool=self.teacher_backbone.attnpool)
|
1253 |
-
# match teacher visual features with teacher concept embs to create pseudo labels
|
1254 |
-
if norm:
|
1255 |
-
teacher_region_feats = teacher_region_feats / teacher_region_feats.norm(dim=-1, keepdim=True)
|
1256 |
-
teacher_concept_emb = self.teacher_concept_emb / self.teacher_concept_emb.norm(dim=-1, keepdim=True)
|
1257 |
-
else:
|
1258 |
-
teacher_concept_emb = self.teacher_concept_emb
|
1259 |
-
concept_scores = teacher_region_feats @ teacher_concept_emb.t() # [#regions, #concepts]
|
1260 |
-
concept_scores = F.softmax(concept_scores / s_temp, dim=1)
|
1261 |
-
max_scores, max_inds = torch.max(concept_scores, dim=1)
|
1262 |
-
keep_regions = max_scores > self.concept_thres # only keep the regions that have high matching score with a concept
|
1263 |
-
if keep_regions.nonzero().size(0) == 0: # if all regions can't match to any concept
|
1264 |
-
print("all regions can't match to any concept!")
|
1265 |
-
keep_regions = max_scores > 0.0
|
1266 |
-
target_inds = max_inds[keep_regions]
|
1267 |
-
target_embs = self.concept_emb[target_inds] # the target embedding of student model
|
1268 |
-
label_mtx = (target_inds.view(-1, 1) == target_inds.view(1, -1)).type_as(teacher_region_feats)
|
1269 |
-
concept_scores = concept_scores[keep_regions]
|
1270 |
-
# matching kept regions with phrase-text to create labels
|
1271 |
-
if phrase_embs is None:
|
1272 |
-
phrase_label_mtx = None
|
1273 |
-
phrase_target_regions = None
|
1274 |
-
else:
|
1275 |
-
if norm:
|
1276 |
-
phrase_embs = phrase_embs / phrase_embs.norm(dim=-1, keepdim=True)
|
1277 |
-
teacher_kept_feats = teacher_region_feats[keep_regions]
|
1278 |
-
phrase_scores = phrase_embs @ teacher_kept_feats.t() # [#phrases, #keep regions]
|
1279 |
-
phrase_scores = F.softmax(phrase_scores / s_temp, dim=1)
|
1280 |
-
_, max_region_inds = torch.max(phrase_scores, dim=1)
|
1281 |
-
phrase_label_mtx = (max_region_inds.view(-1, 1) == max_region_inds.view(1, -1)).type_as(teacher_region_feats)
|
1282 |
-
phrase_target_regions = teacher_kept_feats[max_region_inds]
|
1283 |
-
|
1284 |
-
return concept_scores, target_inds, keep_regions, target_embs, label_mtx, phrase_label_mtx, phrase_target_regions
|
1285 |
-
|
1286 |
-
def get_region_features(self, images, features, proposals, gt_instances):
|
1287 |
-
""" Input images and region proposals, return region features
|
1288 |
-
"""
|
1289 |
-
# Given the proposals, crop region features from 2D image features
|
1290 |
-
if self.use_clip_c4: # use C4 + resnet weights from CLIP
|
1291 |
-
if self.use_clip_attpool: # use att_pool from CLIP to match dimension
|
1292 |
-
region_feats = self.roi_heads(images, features, proposals, gt_instances, res5=self.backbone.layer4, attnpool=self.backbone.attnpool)
|
1293 |
-
else: # use default mean pool
|
1294 |
-
region_feats = self.roi_heads(images, features, proposals, gt_instances, res5=self.backbone.layer4)
|
1295 |
-
else: # default setting
|
1296 |
-
region_feats = self.roi_heads(images, features, proposals, gt_instances)
|
1297 |
-
return region_feats
|
1298 |
-
|
1299 |
-
def get_region_proposals(self, batched_inputs):
|
1300 |
-
""" Given image, return object proposals
|
1301 |
-
"""
|
1302 |
-
if self.grid_regions: # use grid boxes
|
1303 |
-
proposals = self.create_grid_boxes(batched_inputs)
|
1304 |
-
else: # use object proposals
|
1305 |
-
with torch.no_grad():
|
1306 |
-
if self.clip_crop_region_type == "GLOBAL": # from a global box per image
|
1307 |
-
proposals = self.create_global_proposals(batched_inputs)
|
1308 |
-
elif self.clip_crop_region_type == "GRID": # from grid proposals
|
1309 |
-
proposals = self.create_grid_boxes(batched_inputs)
|
1310 |
-
elif self.clip_crop_region_type == "RANDOM": # from random proposals
|
1311 |
-
proposals = self.create_rand_boxes(batched_inputs)
|
1312 |
-
elif self.clip_crop_region_type == "RPN": # from the backbone & RPN of standard Mask-RCNN, trained on base classes
|
1313 |
-
if self.offline_backbone.training or self.offline_proposal_generator.training: # was set to True in training script
|
1314 |
-
self.offline_backbone.eval()
|
1315 |
-
self.offline_proposal_generator.eval()
|
1316 |
-
images = self.offline_preprocess_image(batched_inputs)
|
1317 |
-
features = self.offline_backbone(images.tensor)
|
1318 |
-
if self.offline_proposal_generator is not None:
|
1319 |
-
proposals, _ = self.offline_proposal_generator(images, features, None)
|
1320 |
-
#visualize_proposals(batched_inputs, proposals, self.input_format, vis_pretrain=True)
|
1321 |
-
# randomly select proposals to avoid overfitting
|
1322 |
-
if self.training:
|
1323 |
-
#rand_inds = [torch.arange(len(p))[:self.num_regions_per_img].to(self.device) for p in proposals]
|
1324 |
-
rand_inds = [torch.randperm(len(p))[:self.num_regions_per_img].to(self.device) for p in proposals]
|
1325 |
-
proposals = [p[rand_inds[i]] for i, p in enumerate(proposals)]
|
1326 |
-
return proposals
|
1327 |
-
|
1328 |
-
def offline_preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
1329 |
-
"""
|
1330 |
-
NOTE: the image tsv in pretraining are already normalized pixel values and thus opposite to Detectron2 default input.
|
1331 |
-
Normalize, pad and batch the input images. Use detectron2 default processing (pixel mean & std).
|
1332 |
-
Note: Due to FPN size_divisibility, images are padded by right/bottom border. So FPN is consistent with C4 and GT boxes.
|
1333 |
-
"""
|
1334 |
-
images = [x[0].to(self.device) for x in batched_inputs]
|
1335 |
-
if (self.input_format == 'RGB' and self.offline_input_format == 'BGR') or \
|
1336 |
-
(self.input_format == 'BGR' and self.offline_input_format == 'RGB'): # the input image follows the main config format ('RGB' or 'BGR')
|
1337 |
-
images = [x[[2,1,0],:,:] for x in images]
|
1338 |
-
if self.offline_div_pixel:
|
1339 |
-
images = [(x - self.offline_pixel_mean) / self.offline_pixel_std for x in images]
|
1340 |
-
else:
|
1341 |
-
images = [((x * 255.0) - self.offline_pixel_mean) / self.offline_pixel_std for x in images]
|
1342 |
-
images = ImageList.from_tensors(images, self.offline_backbone.size_divisibility)
|
1343 |
-
return images
|
1344 |
-
|
1345 |
-
def preprocess_image(self, batched_inputs: List[Dict[str, torch.Tensor]]):
|
1346 |
-
"""
|
1347 |
-
NOTE: the image tsv in pretraining are already normalized pixel values and thus opposite to Detectron2 default input.
|
1348 |
-
Normalize, pad and batch the input images. Use CLIP default processing (pixel mean & std).
|
1349 |
-
Note: Due to FPN size_divisibility, images are padded by right/bottom border. So FPN is consistent with C4 and GT boxes.
|
1350 |
-
"""
|
1351 |
-
images = [x[0].to(self.device) for x in batched_inputs]
|
1352 |
-
if self.div_pixel:
|
1353 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
1354 |
-
else:
|
1355 |
-
images = [((x * 255.0) - self.pixel_mean) / self.pixel_std for x in images]
|
1356 |
-
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
|
1357 |
-
return images
|
1358 |
-
|
1359 |
-
def create_rand_boxes(self, batched_inputs, grid_length=8):
|
1360 |
-
""" create random boxes within an image, output random self.num_regions_per_img boxes
|
1361 |
-
return a list of Boxes
|
1362 |
-
"""
|
1363 |
-
images = self.preprocess_image(batched_inputs)
|
1364 |
-
image_height = images.tensor.size(2)
|
1365 |
-
image_width = images.tensor.size(3)
|
1366 |
-
|
1367 |
-
left_top_x = torch.tensor([i*(grid_length) for i in range(image_width // grid_length)])
|
1368 |
-
left_top_y = torch.tensor([i*(grid_length) for i in range(image_height // grid_length)])
|
1369 |
-
right_bot_x = torch.tensor([(i+1)*(grid_length) for i in range(image_width // grid_length)])
|
1370 |
-
right_bot_y = torch.tensor([(i+1)*(grid_length) for i in range(image_height // grid_length)])
|
1371 |
-
x_inds = torch.randint(0, left_top_x.size(0), (self.num_regions_per_img,))
|
1372 |
-
y_inds = torch.randint(0, left_top_y.size(0), (self.num_regions_per_img,))
|
1373 |
-
|
1374 |
-
proposals = []
|
1375 |
-
for i in range(self.num_regions_per_img):
|
1376 |
-
rb_x_candidates = right_bot_x[x_inds[i]:]
|
1377 |
-
rb_x = rb_x_candidates[torch.randperm(rb_x_candidates.size(0))[0]]
|
1378 |
-
rb_y_candidates = right_bot_y[y_inds[i]:]
|
1379 |
-
rb_y = rb_y_candidates[torch.randperm(rb_y_candidates.size(0))[0]]
|
1380 |
-
this_box = torch.cat((left_top_x[x_inds[i]].view(1,1), left_top_y[y_inds[i]].view(1,1), rb_x.view(1,1), rb_y.view(1,1)),dim=-1)
|
1381 |
-
proposals.append(this_box)
|
1382 |
-
proposals = torch.cat(proposals).float().to(self.device)
|
1383 |
-
proposals = [Boxes(proposals) for i in range(len(batched_inputs))] # a list of Boxes
|
1384 |
-
return proposals
|
1385 |
-
|
1386 |
-
def create_grid_boxes(self, batched_inputs, grid_length=32):
|
1387 |
-
""" create (image_height/32) * (image_width/32) pseudo grid boxes, and randomly sample self.num_regions_per_img boxes
|
1388 |
-
return a list of Boxes
|
1389 |
-
"""
|
1390 |
-
images = self.preprocess_image(batched_inputs)
|
1391 |
-
image_height = images.tensor.size(2)
|
1392 |
-
image_width = images.tensor.size(3)
|
1393 |
-
|
1394 |
-
left_top_x = torch.tensor([i*(grid_length) for i in range(image_width // grid_length)])
|
1395 |
-
left_top_y = torch.tensor([i*(grid_length) for i in range(image_height // grid_length)])
|
1396 |
-
right_bot_x = torch.tensor([(i+1)*(grid_length) for i in range(image_width // grid_length)])
|
1397 |
-
right_bot_y = torch.tensor([(i+1)*(grid_length) for i in range(image_height // grid_length)])
|
1398 |
-
left_top_x, left_top_y = torch.meshgrid(left_top_x, left_top_y)
|
1399 |
-
right_bot_x, right_bot_y = torch.meshgrid(right_bot_x, right_bot_y)
|
1400 |
-
grid_boxes = torch.cat((left_top_x.flatten().view(-1,1), left_top_y.flatten().view(-1,1),\
|
1401 |
-
right_bot_x.flatten().view(-1,1), right_bot_y.flatten().view(-1,1),), dim=1)
|
1402 |
-
sample_ind = torch.randperm(grid_boxes.size(0))[:self.num_regions_per_img]
|
1403 |
-
grid_boxes = grid_boxes[sample_ind]
|
1404 |
-
grid_boxes = grid_boxes.float().to(self.device)
|
1405 |
-
proposals = [Boxes(grid_boxes) for i in range(len(batched_inputs))] # a list of Boxes
|
1406 |
-
return proposals
|
1407 |
-
|
1408 |
-
def create_global_proposals(self, batched_inputs):
|
1409 |
-
""" create a single global box for an image, so as to extract global image features with RoIAlign on high-resolution images.
|
1410 |
-
"""
|
1411 |
-
images = self.preprocess_image(batched_inputs)
|
1412 |
-
image_height = images.tensor.size(2)
|
1413 |
-
image_width = images.tensor.size(3)
|
1414 |
-
|
1415 |
-
global_box = torch.tensor([0, 0, image_width, image_height]).view(1,4).float().to(self.device)
|
1416 |
-
proposals = [Boxes(global_box) for i in range(len(batched_inputs))] # a list of Boxes
|
1417 |
-
return proposals
|
1418 |
-
|
1419 |
-
def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
|
1420 |
-
"""
|
1421 |
-
Grounding inference: map region features with sentence tokens
|
1422 |
-
return: matching scores between region features and tokenized texts, region boxes in raw image resolution, image id & raw string texts & tokenized texts
|
1423 |
-
"""
|
1424 |
-
assert len(batched_inputs) == 1 # only one instance per image during inference
|
1425 |
-
gt_instances = None
|
1426 |
-
losses = {}
|
1427 |
-
|
1428 |
-
# localization branch: offline modules to get the region proposals
|
1429 |
-
proposals = self.get_region_proposals(batched_inputs)
|
1430 |
-
|
1431 |
-
# recognition branch: get 2D feature maps using the backbone of recognition branch
|
1432 |
-
images = self.preprocess_image(batched_inputs)
|
1433 |
-
features = self.backbone(images.tensor)
|
1434 |
-
region_feats = self.get_region_features(images, features, proposals, gt_instances)
|
1435 |
-
|
1436 |
-
# encode text
|
1437 |
-
num_cap = int(batched_inputs[0][1].size(0) / self.context_length)
|
1438 |
-
text = batched_inputs[0][1].view(num_cap, -1).to(self.device) # [num_cap, context_length]
|
1439 |
-
text_embs = self.lang_encoder.encode_text(text, only_eot=False) # [img_batch, n_ctx, transformer.width] or [img_batch, transformer.width]
|
1440 |
-
|
1441 |
-
# matching visual features with text embs
|
1442 |
-
region_feats = region_feats / region_feats.norm(dim=-1, keepdim=True)
|
1443 |
-
text_embs = text_embs / text_embs.norm(dim=-1, keepdim=True)
|
1444 |
-
match_scores = region_feats @ text_embs.view(-1, text_embs.size(-1)).t() # [#regions, img_batch * n_ctx]
|
1445 |
-
# visualize_proposals(batched_inputs, proposals, self.input_format, vis_pretrain=True)
|
1446 |
-
|
1447 |
-
# multiply RPN logits
|
1448 |
-
rpn_scores = [p.get('objectness_logits') for p in proposals][0]
|
1449 |
-
match_scores = (match_scores * rpn_scores[:, None]) ** 0.5
|
1450 |
-
|
1451 |
-
# scale the object proposals back to raw image resolution
|
1452 |
-
if do_postprocess:
|
1453 |
-
assert not torch.jit.is_scripting(), "Scripting is not supported for postprocess."
|
1454 |
-
processed_results = PretrainFastRCNN._postprocess(proposals, batched_inputs)
|
1455 |
-
return match_scores, processed_results
|
1456 |
-
|
1457 |
-
@staticmethod
|
1458 |
-
def _postprocess(instances, batched_inputs: List[Dict[str, torch.Tensor]]):
|
1459 |
-
"""
|
1460 |
-
Rescale the output instances to the target size.
|
1461 |
-
"""
|
1462 |
-
# note: private function; subject to changes
|
1463 |
-
processed_results = []
|
1464 |
-
for results_per_image, input_per_image in zip(instances, batched_inputs):
|
1465 |
-
height, width = input_per_image[-1][2] # original image size, before resizing
|
1466 |
-
r = detector_postprocess(results_per_image, height, width)
|
1467 |
-
processed_results.append({"instances": r})
|
1468 |
-
return processed_results
|
1469 |
-
|
1470 |
-
|
1471 |
-
def visualize_proposals(batched_inputs, proposals, input_format, vis_pretrain=False):
|
1472 |
-
"""
|
1473 |
-
A function used to visualize images and proposals. It shows ground truth
|
1474 |
-
bounding boxes on the original image and up to 20 top-scoring predicted
|
1475 |
-
object proposals on the original image. Users can implement different
|
1476 |
-
visualization functions for different models.
|
1477 |
-
|
1478 |
-
Args:
|
1479 |
-
batched_inputs (list): a list that contains input to the model.
|
1480 |
-
proposals (list): a list that contains predicted proposals. Both
|
1481 |
-
batched_inputs and proposals should have the same length.
|
1482 |
-
"""
|
1483 |
-
from detectron2.utils.visualizer import Visualizer
|
1484 |
-
|
1485 |
-
max_vis_prop = 50
|
1486 |
-
if vis_pretrain:
|
1487 |
-
for i, (input, prop) in enumerate(zip(batched_inputs, proposals)):
|
1488 |
-
img = input[0] * 255.0
|
1489 |
-
img = convert_image_to_rgb(img.permute(1, 2, 0), input_format)
|
1490 |
-
box_size = min(len(prop.proposal_boxes), max_vis_prop)
|
1491 |
-
v_pred = Visualizer(img, None)
|
1492 |
-
v_pred = v_pred.overlay_instances(
|
1493 |
-
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
|
1494 |
-
)
|
1495 |
-
prop_img = v_pred.get_image()
|
1496 |
-
vis_img = prop_img
|
1497 |
-
to_save = Image.fromarray(np.array(vis_img, np.uint8))
|
1498 |
-
#to_save.save("output/regions/" + str(i) + ".png")
|
1499 |
-
#break # only visualize one image in a batch
|
1500 |
-
else:
|
1501 |
-
for input, prop in zip(batched_inputs, proposals):
|
1502 |
-
img = input["image"]
|
1503 |
-
img = convert_image_to_rgb(img.permute(1, 2, 0), input_format)
|
1504 |
-
box_size = min(len(prop.proposal_boxes), max_vis_prop)
|
1505 |
-
v_pred = Visualizer(img, None)
|
1506 |
-
v_pred = v_pred.overlay_instances(
|
1507 |
-
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
|
1508 |
-
)
|
1509 |
-
prop_img = v_pred.get_image()
|
1510 |
-
vis_img = prop_img
|
1511 |
-
# f_n = input['file_name']
|
1512 |
-
to_save = Image.fromarray(np.array(vis_img, np.uint8))
|
1513 |
-
#to_save.save("output/regions/" + "proposals.png")
|
1514 |
-
#break # only visualize one image in a batch
|
1515 |
-
|
1516 |
-
def visualize_results(batched_inputs, results, input_format, vis_pretrain=False):
|
1517 |
-
"""
|
1518 |
-
A function used to visualize images and results. It shows ground truth
|
1519 |
-
bounding boxes on the original image and up to 20 top-scoring predicted
|
1520 |
-
object results on the original image. Users can implement different
|
1521 |
-
visualization functions for different models.
|
1522 |
-
|
1523 |
-
Args:
|
1524 |
-
batched_inputs (list): a list that contains input to the model.
|
1525 |
-
results (list): a list that contains predicted results. Both
|
1526 |
-
batched_inputs and results should have the same length.
|
1527 |
-
"""
|
1528 |
-
from detectron2.utils.visualizer import Visualizer
|
1529 |
-
|
1530 |
-
max_vis_prop = 1
|
1531 |
-
if vis_pretrain:
|
1532 |
-
for i, (input, prop) in enumerate(zip(batched_inputs, results)):
|
1533 |
-
img = input[0] * 255.0
|
1534 |
-
img = convert_image_to_rgb(img.permute(1, 2, 0), input_format)
|
1535 |
-
box_size = min(len(prop.proposal_boxes), max_vis_prop)
|
1536 |
-
v_pred = Visualizer(img, None)
|
1537 |
-
v_pred = v_pred.overlay_instances(
|
1538 |
-
boxes=prop.proposal_boxes[0:box_size].tensor.cpu().numpy()
|
1539 |
-
)
|
1540 |
-
prop_img = v_pred.get_image()
|
1541 |
-
vis_img = prop_img
|
1542 |
-
to_save = Image.fromarray(np.array(vis_img, np.uint8))
|
1543 |
-
#to_save.save("output/regions/" + str(i) + ".png")
|
1544 |
-
#break # only visualize one image in a batch
|
1545 |
-
else:
|
1546 |
-
for input, prop in zip(batched_inputs, results):
|
1547 |
-
img = input["image"]
|
1548 |
-
img = convert_image_to_rgb(img.permute(1, 2, 0), input_format)
|
1549 |
-
box_size = min(len(prop.pred_boxes), max_vis_prop)
|
1550 |
-
v_pred = Visualizer(img, None)
|
1551 |
-
v_pred = v_pred.overlay_instances(
|
1552 |
-
boxes=prop.pred_boxes[0:box_size].tensor.cpu().numpy()
|
1553 |
-
)
|
1554 |
-
prop_img = v_pred.get_image()
|
1555 |
-
vis_img = prop_img
|
1556 |
-
# f_n = input['file_name']
|
1557 |
-
to_save = Image.fromarray(np.array(vis_img, np.uint8))
|
1558 |
-
#to_save.save("output/regions/" + "results.png")
|
1559 |
-
#break # only visualize one image in a batch
|
1560 |
-
return to_save
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ClearLove443/Robby-chatbot/setup.sh
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
mkdir -p ~/.streamlit/
|
2 |
-
|
3 |
-
echo "\
|
4 |
-
[general]\n\
|
5 |
-
email = \"[email protected]\"\n\
|
6 |
-
" > ~/.streamlit/credentials.toml
|
7 |
-
|
8 |
-
echo "\
|
9 |
-
[server]\n\
|
10 |
-
headless = true\n\
|
11 |
-
enableCORS=false\n\
|
12 |
-
port = $PORT\n\
|
13 |
-
\n\
|
14 |
-
[theme]\n\
|
15 |
-
base = \"light\"\n\
|
16 |
-
primaryColor = \"#89CFF0\"\n\
|
17 |
-
backgroundColor = \"#E0F7FE\"\n\
|
18 |
-
secondaryBackgroundColor = \"#FFFCE4\"\n\
|
19 |
-
textColor = \"#000000\"\n\
|
20 |
-
font = \"sans serif\"\n\
|
21 |
-
" > ~/.streamlit/config.toml
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/njpad/style.css
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
body {
|
2 |
-
padding: 2rem;
|
3 |
-
font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
|
4 |
-
}
|
5 |
-
|
6 |
-
h1 {
|
7 |
-
font-size: 16px;
|
8 |
-
margin-top: 0;
|
9 |
-
}
|
10 |
-
|
11 |
-
p {
|
12 |
-
color: rgb(107, 114, 128);
|
13 |
-
font-size: 15px;
|
14 |
-
margin-bottom: 10px;
|
15 |
-
margin-top: 5px;
|
16 |
-
}
|
17 |
-
|
18 |
-
.card {
|
19 |
-
max-width: 620px;
|
20 |
-
margin: 0 auto;
|
21 |
-
padding: 16px;
|
22 |
-
border: 1px solid lightgray;
|
23 |
-
border-radius: 16px;
|
24 |
-
}
|
25 |
-
|
26 |
-
.card p:last-child {
|
27 |
-
margin-bottom: 0;
|
28 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/api.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
# based on https://github.com/isl-org/MiDaS
|
2 |
-
|
3 |
-
import cv2
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
from torchvision.transforms import Compose
|
7 |
-
|
8 |
-
from ldm.modules.midas.midas.dpt_depth import DPTDepthModel
|
9 |
-
from ldm.modules.midas.midas.midas_net import MidasNet
|
10 |
-
from ldm.modules.midas.midas.midas_net_custom import MidasNet_small
|
11 |
-
from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
|
12 |
-
|
13 |
-
|
14 |
-
ISL_PATHS = {
|
15 |
-
"dpt_large": "midas_models/dpt_large-midas-2f21e586.pt",
|
16 |
-
"dpt_hybrid": "midas_models/dpt_hybrid-midas-501f0c75.pt",
|
17 |
-
"midas_v21": "",
|
18 |
-
"midas_v21_small": "",
|
19 |
-
}
|
20 |
-
|
21 |
-
|
22 |
-
def disabled_train(self, mode=True):
|
23 |
-
"""Overwrite model.train with this function to make sure train/eval mode
|
24 |
-
does not change anymore."""
|
25 |
-
return self
|
26 |
-
|
27 |
-
|
28 |
-
def load_midas_transform(model_type):
|
29 |
-
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
30 |
-
# load transform only
|
31 |
-
if model_type == "dpt_large": # DPT-Large
|
32 |
-
net_w, net_h = 384, 384
|
33 |
-
resize_mode = "minimal"
|
34 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
35 |
-
|
36 |
-
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
37 |
-
net_w, net_h = 384, 384
|
38 |
-
resize_mode = "minimal"
|
39 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
40 |
-
|
41 |
-
elif model_type == "midas_v21":
|
42 |
-
net_w, net_h = 384, 384
|
43 |
-
resize_mode = "upper_bound"
|
44 |
-
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
45 |
-
|
46 |
-
elif model_type == "midas_v21_small":
|
47 |
-
net_w, net_h = 256, 256
|
48 |
-
resize_mode = "upper_bound"
|
49 |
-
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
50 |
-
|
51 |
-
else:
|
52 |
-
assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
|
53 |
-
|
54 |
-
transform = Compose(
|
55 |
-
[
|
56 |
-
Resize(
|
57 |
-
net_w,
|
58 |
-
net_h,
|
59 |
-
resize_target=None,
|
60 |
-
keep_aspect_ratio=True,
|
61 |
-
ensure_multiple_of=32,
|
62 |
-
resize_method=resize_mode,
|
63 |
-
image_interpolation_method=cv2.INTER_CUBIC,
|
64 |
-
),
|
65 |
-
normalization,
|
66 |
-
PrepareForNet(),
|
67 |
-
]
|
68 |
-
)
|
69 |
-
|
70 |
-
return transform
|
71 |
-
|
72 |
-
|
73 |
-
def load_model(model_type):
|
74 |
-
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
75 |
-
# load network
|
76 |
-
model_path = ISL_PATHS[model_type]
|
77 |
-
if model_type == "dpt_large": # DPT-Large
|
78 |
-
model = DPTDepthModel(
|
79 |
-
path=model_path,
|
80 |
-
backbone="vitl16_384",
|
81 |
-
non_negative=True,
|
82 |
-
)
|
83 |
-
net_w, net_h = 384, 384
|
84 |
-
resize_mode = "minimal"
|
85 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
86 |
-
|
87 |
-
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
88 |
-
model = DPTDepthModel(
|
89 |
-
path=model_path,
|
90 |
-
backbone="vitb_rn50_384",
|
91 |
-
non_negative=True,
|
92 |
-
)
|
93 |
-
net_w, net_h = 384, 384
|
94 |
-
resize_mode = "minimal"
|
95 |
-
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
96 |
-
|
97 |
-
elif model_type == "midas_v21":
|
98 |
-
model = MidasNet(model_path, non_negative=True)
|
99 |
-
net_w, net_h = 384, 384
|
100 |
-
resize_mode = "upper_bound"
|
101 |
-
normalization = NormalizeImage(
|
102 |
-
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
103 |
-
)
|
104 |
-
|
105 |
-
elif model_type == "midas_v21_small":
|
106 |
-
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
|
107 |
-
non_negative=True, blocks={'expand': True})
|
108 |
-
net_w, net_h = 256, 256
|
109 |
-
resize_mode = "upper_bound"
|
110 |
-
normalization = NormalizeImage(
|
111 |
-
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
112 |
-
)
|
113 |
-
|
114 |
-
else:
|
115 |
-
print(f"model_type '{model_type}' not implemented, use: --model_type large")
|
116 |
-
assert False
|
117 |
-
|
118 |
-
transform = Compose(
|
119 |
-
[
|
120 |
-
Resize(
|
121 |
-
net_w,
|
122 |
-
net_h,
|
123 |
-
resize_target=None,
|
124 |
-
keep_aspect_ratio=True,
|
125 |
-
ensure_multiple_of=32,
|
126 |
-
resize_method=resize_mode,
|
127 |
-
image_interpolation_method=cv2.INTER_CUBIC,
|
128 |
-
),
|
129 |
-
normalization,
|
130 |
-
PrepareForNet(),
|
131 |
-
]
|
132 |
-
)
|
133 |
-
|
134 |
-
return model.eval(), transform
|
135 |
-
|
136 |
-
|
137 |
-
class MiDaSInference(nn.Module):
|
138 |
-
MODEL_TYPES_TORCH_HUB = [
|
139 |
-
"DPT_Large",
|
140 |
-
"DPT_Hybrid",
|
141 |
-
"MiDaS_small"
|
142 |
-
]
|
143 |
-
MODEL_TYPES_ISL = [
|
144 |
-
"dpt_large",
|
145 |
-
"dpt_hybrid",
|
146 |
-
"midas_v21",
|
147 |
-
"midas_v21_small",
|
148 |
-
]
|
149 |
-
|
150 |
-
def __init__(self, model_type):
|
151 |
-
super().__init__()
|
152 |
-
assert (model_type in self.MODEL_TYPES_ISL)
|
153 |
-
model, _ = load_model(model_type)
|
154 |
-
self.model = model
|
155 |
-
self.model.train = disabled_train
|
156 |
-
|
157 |
-
def forward(self, x):
|
158 |
-
# x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
|
159 |
-
# NOTE: we expect that the correct transform has been called during dataloading.
|
160 |
-
with torch.no_grad():
|
161 |
-
prediction = self.model(x)
|
162 |
-
prediction = torch.nn.functional.interpolate(
|
163 |
-
prediction.unsqueeze(1),
|
164 |
-
size=x.shape[2:],
|
165 |
-
mode="bicubic",
|
166 |
-
align_corners=False,
|
167 |
-
)
|
168 |
-
assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
|
169 |
-
return prediction
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/img.py
DELETED
@@ -1,521 +0,0 @@
|
|
1 |
-
#coding=utf-8
|
2 |
-
'''
|
3 |
-
@author: dengdan
|
4 |
-
'''
|
5 |
-
import cv2
|
6 |
-
import numpy as np
|
7 |
-
import logging
|
8 |
-
import math
|
9 |
-
import event
|
10 |
-
import util
|
11 |
-
|
12 |
-
IMREAD_GRAY = 0
|
13 |
-
IMREAD_COLOR = 1
|
14 |
-
IMREAD_UNCHANGED = -1
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
COLOR_WHITE =(255, 255, 255)
|
19 |
-
COLOR_BLACK = (0, 0, 0)
|
20 |
-
COLOR_GREEN = (0, 255, 0)
|
21 |
-
|
22 |
-
COLOR_RGB_RED = (255, 0, 0)
|
23 |
-
COLOR_BGR_RED = (0, 0, 255)
|
24 |
-
|
25 |
-
COLOR_RGB_BLUE = (0, 0, 255)
|
26 |
-
COLOR_BGR_BLUE = (255, 0, 0)
|
27 |
-
|
28 |
-
COLOR_RGB_YELLOW = (255, 255, 0)
|
29 |
-
COLOR_BGR_YELLOW = (0, 255, 255)
|
30 |
-
|
31 |
-
|
32 |
-
COLOR_RGB_GRAY = (47, 79, 79)
|
33 |
-
|
34 |
-
COLOR_RGB_PINK = (255, 192, 203)
|
35 |
-
def imread(path, rgb = False, mode = cv2.IMREAD_COLOR):
|
36 |
-
path = util.io.get_absolute_path(path)
|
37 |
-
img = cv2.imread(path, mode)
|
38 |
-
if img is None:
|
39 |
-
raise IOError('File not found:%s'%(path))
|
40 |
-
|
41 |
-
if rgb:
|
42 |
-
img = bgr2rgb(img)
|
43 |
-
return img
|
44 |
-
|
45 |
-
def imshow(winname, img, block = True, position = None, maximized = False, rgb = False):
|
46 |
-
if isinstance(img, str):
|
47 |
-
img = imread(path = img)
|
48 |
-
|
49 |
-
cv2.namedWindow(winname, cv2.WINDOW_NORMAL)
|
50 |
-
if rgb:
|
51 |
-
img = rgb2bgr(img)
|
52 |
-
cv2.imshow(winname, img)
|
53 |
-
if position is not None:
|
54 |
-
# cv2.moveWindow(winname, position[0], position[1])
|
55 |
-
move_win(winname, position)
|
56 |
-
|
57 |
-
if maximized:
|
58 |
-
maximize_win(winname)
|
59 |
-
|
60 |
-
|
61 |
-
if block:
|
62 |
-
# cv2.waitKey(0)
|
63 |
-
event.wait_key(" ")
|
64 |
-
cv2.destroyAllWindows()
|
65 |
-
|
66 |
-
|
67 |
-
def imwrite(path, img, rgb = False):
|
68 |
-
if rgb:
|
69 |
-
img = rgb2bgr(img)
|
70 |
-
path = util.io.get_absolute_path(path)
|
71 |
-
util.io.make_parent_dir(path)
|
72 |
-
cv2.imwrite(path, img)
|
73 |
-
|
74 |
-
def move_win(winname, position = (0, 0)):
|
75 |
-
"""
|
76 |
-
move pyplot window
|
77 |
-
"""
|
78 |
-
cv2.moveWindow(winname, position[0], position[1])
|
79 |
-
|
80 |
-
def maximize_win(winname):
|
81 |
-
cv2.setWindowProperty(winname, cv2.WND_PROP_FULLSCREEN, True);
|
82 |
-
|
83 |
-
def eq_color(target, color):
|
84 |
-
for i, c in enumerate(color):
|
85 |
-
if target[i] != color[i]:
|
86 |
-
return False
|
87 |
-
return True
|
88 |
-
|
89 |
-
def is_white(color):
|
90 |
-
for c in color:
|
91 |
-
if c < 255:
|
92 |
-
return False
|
93 |
-
return True
|
94 |
-
|
95 |
-
def black(shape):
|
96 |
-
if len(np.shape(shape)) >= 2:
|
97 |
-
shape = get_shape(shape)
|
98 |
-
shape = [int(v) for v in shape]
|
99 |
-
return np.zeros(shape, np.uint8)
|
100 |
-
|
101 |
-
def white(shape, value = 255):
|
102 |
-
if len(np.shape(shape)) >= 2:
|
103 |
-
shape = get_shape(shape)
|
104 |
-
return np.ones(shape, np.uint8) * np.uint8(value)
|
105 |
-
|
106 |
-
def bgr2rgb(img):
|
107 |
-
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
108 |
-
|
109 |
-
def rgb2bgr(img):
|
110 |
-
return cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
111 |
-
|
112 |
-
|
113 |
-
def rgb2gray(img):
|
114 |
-
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
115 |
-
|
116 |
-
def bgr2gray(img):
|
117 |
-
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
118 |
-
|
119 |
-
|
120 |
-
def ds_size(image_size, kernel_size, stride):
|
121 |
-
"""calculate the size of downsampling result"""
|
122 |
-
image_x, image_y = image_size
|
123 |
-
|
124 |
-
|
125 |
-
kernel_x, kernel_y = kernel_size
|
126 |
-
stride_x, stride_y = stride
|
127 |
-
|
128 |
-
def f(iw, kw, sw):
|
129 |
-
return int(np.floor((iw - kw) / sw) + 1)
|
130 |
-
|
131 |
-
output_size = (f(image_x, kernel_x, stride_x), f(image_y, kernel_y, stride_y))
|
132 |
-
return output_size
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
def get_roi(img, p1, p2):
|
137 |
-
"""
|
138 |
-
extract region of interest from an image.
|
139 |
-
p1, p2: two tuples standing for two opposite corners of the rectangle bounding the roi.
|
140 |
-
Their order is arbitrary.
|
141 |
-
"""
|
142 |
-
x1, y1 = p1
|
143 |
-
x2, y2 = p2
|
144 |
-
|
145 |
-
x_min = min([x1, x2])
|
146 |
-
y_min = min([y1, y2])
|
147 |
-
x_max = max([x1, x2]) + 1
|
148 |
-
y_max = max([y1, y2]) + 1
|
149 |
-
|
150 |
-
return img[y_min: y_max, x_min: x_max]
|
151 |
-
|
152 |
-
def rectangle(img, left_up, right_bottom, color, border_width = 1):
|
153 |
-
left_up = (int(left_up[0]), int(left_up[1]))
|
154 |
-
right_bottom = (int(right_bottom[0]), int(right_bottom[1]))
|
155 |
-
cv2.rectangle(img, left_up, right_bottom, color, border_width)
|
156 |
-
|
157 |
-
|
158 |
-
def circle(img, center, r, color, border_width = 1):
|
159 |
-
center = (int(center[0]), int(center[1]))
|
160 |
-
cv2.circle(img, center, r, color, border_width)
|
161 |
-
|
162 |
-
def render_points(img, points, color):
|
163 |
-
for p in points:
|
164 |
-
x, y = p
|
165 |
-
img[y][x] = color
|
166 |
-
|
167 |
-
|
168 |
-
def draw_contours(img, contours, idx = -1, color = 1, border_width = 1):
|
169 |
-
# img = img.copy()
|
170 |
-
cv2.drawContours(img, contours, idx, color, border_width)
|
171 |
-
return img
|
172 |
-
|
173 |
-
def get_contour_rect_box(contour):
|
174 |
-
x,y,w,h = cv2.boundingRect(contour)
|
175 |
-
return x, y, w, h
|
176 |
-
|
177 |
-
def get_contour_region_in_rect(img, contour):
|
178 |
-
x, y, w, h = get_contour_rect_box(contour)
|
179 |
-
lu, rb = (x, y), (x + w, y + h)
|
180 |
-
return get_roi(img, lu, rb)
|
181 |
-
|
182 |
-
def get_contour_min_area_box(contour):
|
183 |
-
rect = cv2.minAreaRect(contour)
|
184 |
-
box = cv2.cv.BoxPoints(rect)
|
185 |
-
box = np.int0(box)
|
186 |
-
return box
|
187 |
-
|
188 |
-
def get_contour_region_in_min_area_rect(img, cnt):
|
189 |
-
# find the min area rect of contour
|
190 |
-
rect = cv2.minAreaRect(cnt)
|
191 |
-
angle = rect[-1]
|
192 |
-
box = cv2.cv.BoxPoints(rect)
|
193 |
-
box_cnt = points_to_contour(box)
|
194 |
-
|
195 |
-
# find the rectangle containing box_cnt, and set it as ROI
|
196 |
-
outer_rect = get_contour_rect_box(box_cnt)
|
197 |
-
x, y, w, h = outer_rect
|
198 |
-
img = get_roi(img, (x, y), (x + w, y + h))
|
199 |
-
box = [(ox - x, oy - y) for (ox, oy) in box]
|
200 |
-
|
201 |
-
# rotate ROI and corner points
|
202 |
-
rows, cols = get_shape(img)
|
203 |
-
M = cv2.getRotationMatrix2D((cols/2,rows/2), angle, scale = 1)
|
204 |
-
dst = cv2.warpAffine(img,M,(cols,rows))
|
205 |
-
bar_xy = np.hstack((box, np.ones((4, 1))))
|
206 |
-
new_corners = np.dot(M, np.transpose(bar_xy))
|
207 |
-
new_corners = util.dtype.int(np.transpose(new_corners))
|
208 |
-
# cnt = points_to_contour(new_corners)
|
209 |
-
|
210 |
-
xs = new_corners[:, 0]
|
211 |
-
ys = new_corners[:, 1]
|
212 |
-
lu = (min(xs), min(ys))
|
213 |
-
rb = (max(xs), max(ys))
|
214 |
-
return get_roi(dst, lu, rb)
|
215 |
-
|
216 |
-
|
217 |
-
def contour_to_points(contour):
|
218 |
-
return np.asarray([c[0] for c in contour])
|
219 |
-
|
220 |
-
|
221 |
-
def points_to_contour(points):
|
222 |
-
contours = [[list(p)]for p in points]
|
223 |
-
return np.asarray(contours, dtype = np.int32)
|
224 |
-
|
225 |
-
def points_to_contours(points):
|
226 |
-
return np.asarray([points_to_contour(points)])
|
227 |
-
|
228 |
-
def get_contour_region_iou(I, cnt1, cnt2):
|
229 |
-
"""
|
230 |
-
calculate the iou of two contours
|
231 |
-
"""
|
232 |
-
mask1 = util.img.black(I)
|
233 |
-
draw_contours(mask1, [cnt1], color = 1, border_width = -1)
|
234 |
-
|
235 |
-
mask2 = util.img.black(I)
|
236 |
-
draw_contours(mask2, [cnt2], color = 1, border_width = -1)
|
237 |
-
|
238 |
-
union_mask = ((mask1 + mask2) >=1) * 1
|
239 |
-
intersect_mask = (mask1 * mask2 >= 1) * 1
|
240 |
-
|
241 |
-
return np.sum(intersect_mask) * 1.0 / np.sum(union_mask)
|
242 |
-
|
243 |
-
|
244 |
-
def fill_bbox(img, box, color = 1):
|
245 |
-
"""
|
246 |
-
filling a bounding box with color.
|
247 |
-
box: a list of 4 points, in clockwise order, as the four vertice of a bounding box
|
248 |
-
"""
|
249 |
-
util.test.assert_equal(np.shape(box), (4, 2))
|
250 |
-
cnt = to_contours(box)
|
251 |
-
draw_contours(img, cnt, color = color, border_width = -1)
|
252 |
-
|
253 |
-
def get_rect_points(left_up, right_bottom):
|
254 |
-
"""
|
255 |
-
given the left up and right bottom points of a rectangle, return its four points
|
256 |
-
"""
|
257 |
-
right_bottom, left_up = np.asarray(right_bottom), np.asarray(left_up)
|
258 |
-
w, h = right_bottom - left_up
|
259 |
-
x, y = left_up
|
260 |
-
points = [(x, y), (x + w, y), (x + w, y + h), (x, y + h)]
|
261 |
-
return points
|
262 |
-
|
263 |
-
def rect_perimeter(left_up, right_bottom):
|
264 |
-
"""
|
265 |
-
calculate the perimeter of the rectangle described by its left-up and right-bottom point.
|
266 |
-
"""
|
267 |
-
return sum(np.asarray(right_bottom) - np.asarray(left_up)) * 2
|
268 |
-
|
269 |
-
def rect_area(left_up, right_bottom):
|
270 |
-
wh = np.asarray(right_bottom) - np.asarray(left_up) + 1
|
271 |
-
return np.prod(wh)
|
272 |
-
|
273 |
-
def apply_mask(img, mask):
|
274 |
-
"""
|
275 |
-
the img will be masked in place.
|
276 |
-
"""
|
277 |
-
c = np.shape(img)[-1]
|
278 |
-
for i in range(c):
|
279 |
-
img[:, :, i] = img[:, :, i] * mask
|
280 |
-
return img
|
281 |
-
|
282 |
-
def get_shape(img):
|
283 |
-
"""
|
284 |
-
return the height and width of an image
|
285 |
-
"""
|
286 |
-
return np.shape(img)[0:2]
|
287 |
-
|
288 |
-
def get_wh(img):
|
289 |
-
return np.shape(img)[0:2][::-1]
|
290 |
-
|
291 |
-
def get_value(img, x, y = None):
|
292 |
-
if y == None:
|
293 |
-
y = x[1]
|
294 |
-
x = x[0]
|
295 |
-
|
296 |
-
return img[y][x]
|
297 |
-
|
298 |
-
def set_value(img, xy, val):
|
299 |
-
x, y = xy
|
300 |
-
img[y][x] = val
|
301 |
-
|
302 |
-
|
303 |
-
def filter2D(img, kernel):
|
304 |
-
dst = cv2.filter2D(img, -1, kernel)
|
305 |
-
return dst
|
306 |
-
|
307 |
-
def average_blur(img, shape = (5, 5)):
|
308 |
-
return cv2.blur(img, shape)
|
309 |
-
|
310 |
-
def gaussian_blur(img, shape = (5, 5), sigma = 0):
|
311 |
-
# sigma --> sigmaX, sigmaY
|
312 |
-
blur = cv2.GaussianBlur(img,shape, sigma)
|
313 |
-
return blur
|
314 |
-
|
315 |
-
def bilateral_blur(img, d = 9, sigmaColor = 75, sigmaSpace = 75):
|
316 |
-
dst = cv2.bilateralFilter(img, d, sigmaColor, sigmaSpace)
|
317 |
-
return dst
|
318 |
-
|
319 |
-
BLUR_AVERAGE = 'average'
|
320 |
-
BLUR_GAUSSIAN = 'gaussian'
|
321 |
-
BLUR_BILATERAL = 'bilateral'
|
322 |
-
|
323 |
-
|
324 |
-
_blur_dict = {
|
325 |
-
BLUR_AVERAGE: average_blur,
|
326 |
-
BLUR_GAUSSIAN: gaussian_blur,
|
327 |
-
BLUR_BILATERAL: bilateral_blur
|
328 |
-
}
|
329 |
-
|
330 |
-
def blur(img, blur_type):
|
331 |
-
fn = _blur_dict[blur_type]
|
332 |
-
return fn(img)
|
333 |
-
|
334 |
-
def put_text(img, text, pos, scale = 1, color = COLOR_WHITE, thickness = 1):
|
335 |
-
pos = np.int32(pos)
|
336 |
-
font = cv2.FONT_HERSHEY_SIMPLEX
|
337 |
-
cv2.putText(img = img, text = text, org = tuple(pos), fontFace = font, fontScale = scale, color = color, thickness = thickness)
|
338 |
-
|
339 |
-
def resize(img, f = None, fx = None, fy = None, size = None, interpolation = cv2.INTER_LINEAR):
|
340 |
-
"""
|
341 |
-
size: (w, h)
|
342 |
-
"""
|
343 |
-
h, w = get_shape(img)
|
344 |
-
if fx != None and fy != None:
|
345 |
-
return cv2.resize(img, None, fx = fx, fy = fy, interpolation = interpolation)
|
346 |
-
|
347 |
-
if size != None:
|
348 |
-
size = util.dtype.int(size)
|
349 |
-
# size = (size[1], size[0])
|
350 |
-
size = tuple(size)
|
351 |
-
return cv2.resize(img, size, interpolation = interpolation)
|
352 |
-
|
353 |
-
return cv2.resize(img, None, fx = f, fy = f, interpolation = interpolation)
|
354 |
-
|
355 |
-
def translate(img, delta_x, delta_y, size = None):
|
356 |
-
M = np.float32([[1,0, delta_x],[0,1, delta_y]])
|
357 |
-
if size == None:
|
358 |
-
size = get_wh(img)
|
359 |
-
|
360 |
-
dst = cv2.warpAffine(img,M, size)
|
361 |
-
return dst
|
362 |
-
|
363 |
-
|
364 |
-
def rotate_about_center(src, angle, scale=1.):
|
365 |
-
"""https://www.oschina.net/translate/opencv-rotation"""
|
366 |
-
w = src.shape[1]
|
367 |
-
h = src.shape[0]
|
368 |
-
rangle = np.deg2rad(angle) # angle in radians
|
369 |
-
# now calculate new image width and height
|
370 |
-
nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w))*scale
|
371 |
-
nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w))*scale
|
372 |
-
# ask OpenCV for the rotation matrix
|
373 |
-
rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
|
374 |
-
# calculate the move from the old center to the new center combined
|
375 |
-
# with the rotation
|
376 |
-
rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5,0]))
|
377 |
-
# the move only affects the translation, so update the translation
|
378 |
-
# part of the transform
|
379 |
-
rot_mat[0,2] += rot_move[0]
|
380 |
-
rot_mat[1,2] += rot_move[1]
|
381 |
-
return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4), rot_mat
|
382 |
-
|
383 |
-
|
384 |
-
def get_rect_iou(rects1, rects2):
|
385 |
-
"""
|
386 |
-
calculate the iou between rects1 and rects2
|
387 |
-
each rect consists of four points:[min_x, min_y, max_x, max_y]
|
388 |
-
return: a iou matrix, len(rects1) * len(rects2)
|
389 |
-
"""
|
390 |
-
rects1, rects2 = np.asarray(rects1), np.asarray(rects2)
|
391 |
-
|
392 |
-
def _to_matrix(p, ps):
|
393 |
-
p = np.ones((len(ps), 1)) * p
|
394 |
-
ps = np.reshape(ps, (len(ps), 1))
|
395 |
-
temp =np.hstack([p, ps])
|
396 |
-
return temp
|
397 |
-
|
398 |
-
def _get_max(p, ps):
|
399 |
-
return np.max(_to_matrix(p, ps), axis = 1)
|
400 |
-
|
401 |
-
def _get_min(p, ps):
|
402 |
-
return np.min(_to_matrix(p, ps), axis = 1)
|
403 |
-
|
404 |
-
|
405 |
-
def _get_area(rect):
|
406 |
-
w, h = rect[:, 2] - rect[:, 0] + 1.0 , rect[:, 3] - rect[:, 1] + 1.0
|
407 |
-
return w * h
|
408 |
-
|
409 |
-
def _get_inter(rect1, rects2):
|
410 |
-
x1 = _get_max(rect1[0], rects2[:, 0])
|
411 |
-
y1 = _get_max(rect1[1], rects2[:, 1])
|
412 |
-
|
413 |
-
x2 = _get_min(rect1[2], rects2[:, 2])
|
414 |
-
y2 = _get_min(rect1[3], rects2[:, 3])
|
415 |
-
|
416 |
-
w,h = x2-x1 +1, y2 - y1 + 1
|
417 |
-
areas = w * h
|
418 |
-
areas[np.where(w < 0)] = 0
|
419 |
-
areas[np.where(h < 0)] = 0
|
420 |
-
return areas
|
421 |
-
|
422 |
-
area2 = _get_area(rects2)
|
423 |
-
area1 = _get_area(rects1)
|
424 |
-
iou = np.zeros((len(rects1), len(rects2)))
|
425 |
-
for ri in range(len(rects1)):
|
426 |
-
inter = _get_inter(rects1[ri, :], rects2)
|
427 |
-
union = area1[ri] + area2 - inter
|
428 |
-
iou[ri, :] = np.transpose( inter / union)
|
429 |
-
return iou
|
430 |
-
|
431 |
-
def find_contours(mask):
|
432 |
-
mask = np.asarray(mask, dtype = np.uint8)
|
433 |
-
mask = mask.copy()
|
434 |
-
contours, _ = cv2.findContours(mask, mode = cv2.RETR_CCOMP,
|
435 |
-
method = cv2.CHAIN_APPROX_SIMPLE)
|
436 |
-
return contours
|
437 |
-
|
438 |
-
def find_two_level_contours(mask):
|
439 |
-
mask = mask.copy()
|
440 |
-
contours, tree = cv2.findContours(mask, mode = cv2.RETR_CCOMP,
|
441 |
-
method = cv2.CHAIN_APPROX_SIMPLE)
|
442 |
-
return contours, tree
|
443 |
-
|
444 |
-
|
445 |
-
def is_in_contour(point, cnt):
|
446 |
-
"""tell whether a point is in contour or not.
|
447 |
-
In-contour here includes both the 'in contour' and 'on contour' cases.
|
448 |
-
point:(x, y)
|
449 |
-
cnt: a cv2 contour
|
450 |
-
"""
|
451 |
-
# doc of pointPolygonTest: http://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=pointpolygontest#cv.PointPolygonTest
|
452 |
-
# the last argument means only tell if in or not, without calculating the shortest distance
|
453 |
-
in_cnt = cv2.pointPolygonTest(cnt, point, False)
|
454 |
-
return in_cnt >= 0;
|
455 |
-
|
456 |
-
def convex_hull(contour):
|
457 |
-
hull = cv2.convexHull(contour, returnPoints=1)
|
458 |
-
return hull
|
459 |
-
|
460 |
-
def random_color_3():
|
461 |
-
c = util.rand.randint(low = 0, high = 255, shape = (3, ))
|
462 |
-
# c = np.uint8(c)
|
463 |
-
return c
|
464 |
-
|
465 |
-
def get_contour_area(cnt):
|
466 |
-
return cv2.contourArea(cnt)
|
467 |
-
|
468 |
-
def is_valid_jpg(jpg_file):
|
469 |
-
with open(jpg_file, 'rb') as f:
|
470 |
-
f.seek(-2, 2)
|
471 |
-
return f.read() == '\xff\xd9'
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
def rotate_point_by_90(x, y, k, w = 1.0, h = 1.0):
|
476 |
-
"""
|
477 |
-
Rotate a point xy on an image by k * 90
|
478 |
-
degrees.
|
479 |
-
Params:
|
480 |
-
x, y: a point, (x, y). If not normalized within 0 and 1, the
|
481 |
-
width and height of the image should be specified clearly.
|
482 |
-
w, h: the width and height of image
|
483 |
-
k: k * 90 degrees will be rotated
|
484 |
-
"""
|
485 |
-
k = k % 4
|
486 |
-
|
487 |
-
if k == 0:
|
488 |
-
return x, y
|
489 |
-
elif k == 1:
|
490 |
-
return y, w - x
|
491 |
-
elif k == 2:
|
492 |
-
return w - x, h - y
|
493 |
-
elif k == 3:
|
494 |
-
return h - y, x
|
495 |
-
|
496 |
-
|
497 |
-
def min_area_rect(xs, ys):
|
498 |
-
"""
|
499 |
-
Args:
|
500 |
-
xs: numpy ndarray with shape=(N,4). N is the number of oriented bboxes. 4 contains [x1, x2, x3, x4]
|
501 |
-
ys: numpy ndarray with shape=(N,4), [y1, y2, y3, y4]
|
502 |
-
Note that [(x1, y1), (x2, y2), (x3, y3), (x4, y4)] can represent an oriented bbox.
|
503 |
-
Return:
|
504 |
-
the oriented rects sorrounding the box, in the format:[cx, cy, w, h, theta].
|
505 |
-
"""
|
506 |
-
xs = np.asarray(xs, dtype = np.float32)
|
507 |
-
ys = np.asarray(ys, dtype = np.float32)
|
508 |
-
|
509 |
-
num_rects = xs.shape[0]
|
510 |
-
box = np.empty((num_rects, 5))#cx, cy, w, h, theta
|
511 |
-
for idx in xrange(num_rects):
|
512 |
-
points = zip(xs[idx, :], ys[idx, :])
|
513 |
-
cnt = points_to_contour(points)
|
514 |
-
rect = cv2.minAreaRect(cnt)
|
515 |
-
cx, cy = rect[0]
|
516 |
-
w, h = rect[1]
|
517 |
-
theta = rect[2]
|
518 |
-
box[idx, :] = [cx, cy, w, h, theta]
|
519 |
-
|
520 |
-
box = np.asarray(box, dtype = xs.dtype)
|
521 |
-
return box
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DHEIVER/Segmento_de_Angio_Coronariana_v6/obstruction_detector.py
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
import cv2 as cv
|
2 |
-
import numpy as np
|
3 |
-
from scipy.signal import find_peaks
|
4 |
-
from PIL import Image # Import PIL
|
5 |
-
|
6 |
-
class ObstructionDetector:
|
7 |
-
def __init__(self, threshold=500):
|
8 |
-
self.threshold = threshold
|
9 |
-
|
10 |
-
def preprocess_image(self, image):
|
11 |
-
# Convert the image to grayscale if it's a color image
|
12 |
-
if len(image.shape) == 3:
|
13 |
-
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
|
14 |
-
|
15 |
-
# Apply Gaussian blur to reduce noise
|
16 |
-
preprocessed_image = cv.GaussianBlur(image, (5, 5), 0)
|
17 |
-
|
18 |
-
# Perform other preprocessing steps as needed (e.g., contrast adjustment, histogram equalization)
|
19 |
-
|
20 |
-
return preprocessed_image
|
21 |
-
|
22 |
-
def plot_histogram(self, image):
|
23 |
-
# Calculate the histogram
|
24 |
-
histogram = cv.calcHist([image], [0], None, [256], [0, 256])
|
25 |
-
|
26 |
-
# Smoothing the histogram using a simple moving average (window size = 5)
|
27 |
-
kernel = np.ones((5, 1)) / 5
|
28 |
-
smoothed_histogram = cv.filter2D(histogram, -1, kernel)
|
29 |
-
|
30 |
-
return smoothed_histogram
|
31 |
-
|
32 |
-
def count_histogram_peaks(self, smoothed_histogram):
|
33 |
-
# Find peaks in the smoothed histogram with frequency greater than the threshold
|
34 |
-
peaks, _ = find_peaks(smoothed_histogram.flatten(), height=self.threshold)
|
35 |
-
return peaks
|
36 |
-
|
37 |
-
def detect_obstruction(self, pil_image): # Accept PIL image directly
|
38 |
-
# Convert PIL image to NumPy array
|
39 |
-
img = np.array(pil_image)
|
40 |
-
|
41 |
-
# Preprocess the image
|
42 |
-
preprocessed_img = self.preprocess_image(img)
|
43 |
-
|
44 |
-
# Count the number of peaks in the smoothed histogram above the threshold
|
45 |
-
smoothed_histogram = self.plot_histogram(preprocessed_img)
|
46 |
-
peaks = self.count_histogram_peaks(smoothed_histogram)
|
47 |
-
|
48 |
-
# Check if peaks are too close together
|
49 |
-
peak_spacing = np.diff(peaks)
|
50 |
-
if len(peak_spacing) == 0 or np.all(peak_spacing < 10):
|
51 |
-
report = "A imagem NÃO contém obstrução significativa | e NÃO possui múltiplas distribuições de densidade claramente distintas."
|
52 |
-
else:
|
53 |
-
report = "A imagem contém obstrução significativa | possui múltiplas distribuições de densidade claramente distintas."
|
54 |
-
|
55 |
-
return report
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/XbmImagePlugin.py
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# The Python Imaging Library.
|
3 |
-
# $Id$
|
4 |
-
#
|
5 |
-
# XBM File handling
|
6 |
-
#
|
7 |
-
# History:
|
8 |
-
# 1995-09-08 fl Created
|
9 |
-
# 1996-11-01 fl Added save support
|
10 |
-
# 1997-07-07 fl Made header parser more tolerant
|
11 |
-
# 1997-07-22 fl Fixed yet another parser bug
|
12 |
-
# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.4)
|
13 |
-
# 2001-05-13 fl Added hotspot handling (based on code from Bernhard Herzog)
|
14 |
-
# 2004-02-24 fl Allow some whitespace before first #define
|
15 |
-
#
|
16 |
-
# Copyright (c) 1997-2004 by Secret Labs AB
|
17 |
-
# Copyright (c) 1996-1997 by Fredrik Lundh
|
18 |
-
#
|
19 |
-
# See the README file for information on usage and redistribution.
|
20 |
-
#
|
21 |
-
|
22 |
-
import re
|
23 |
-
|
24 |
-
from . import Image, ImageFile
|
25 |
-
|
26 |
-
# XBM header
|
27 |
-
xbm_head = re.compile(
|
28 |
-
rb"\s*#define[ \t]+.*_width[ \t]+(?P<width>[0-9]+)[\r\n]+"
|
29 |
-
b"#define[ \t]+.*_height[ \t]+(?P<height>[0-9]+)[\r\n]+"
|
30 |
-
b"(?P<hotspot>"
|
31 |
-
b"#define[ \t]+[^_]*_x_hot[ \t]+(?P<xhot>[0-9]+)[\r\n]+"
|
32 |
-
b"#define[ \t]+[^_]*_y_hot[ \t]+(?P<yhot>[0-9]+)[\r\n]+"
|
33 |
-
b")?"
|
34 |
-
rb"[\000-\377]*_bits\[]"
|
35 |
-
)
|
36 |
-
|
37 |
-
|
38 |
-
def _accept(prefix):
|
39 |
-
return prefix.lstrip()[:7] == b"#define"
|
40 |
-
|
41 |
-
|
42 |
-
##
|
43 |
-
# Image plugin for X11 bitmaps.
|
44 |
-
|
45 |
-
|
46 |
-
class XbmImageFile(ImageFile.ImageFile):
|
47 |
-
format = "XBM"
|
48 |
-
format_description = "X11 Bitmap"
|
49 |
-
|
50 |
-
def _open(self):
|
51 |
-
m = xbm_head.match(self.fp.read(512))
|
52 |
-
|
53 |
-
if not m:
|
54 |
-
msg = "not a XBM file"
|
55 |
-
raise SyntaxError(msg)
|
56 |
-
|
57 |
-
xsize = int(m.group("width"))
|
58 |
-
ysize = int(m.group("height"))
|
59 |
-
|
60 |
-
if m.group("hotspot"):
|
61 |
-
self.info["hotspot"] = (int(m.group("xhot")), int(m.group("yhot")))
|
62 |
-
|
63 |
-
self.mode = "1"
|
64 |
-
self._size = xsize, ysize
|
65 |
-
|
66 |
-
self.tile = [("xbm", (0, 0) + self.size, m.end(), None)]
|
67 |
-
|
68 |
-
|
69 |
-
def _save(im, fp, filename):
|
70 |
-
if im.mode != "1":
|
71 |
-
msg = f"cannot write mode {im.mode} as XBM"
|
72 |
-
raise OSError(msg)
|
73 |
-
|
74 |
-
fp.write(f"#define im_width {im.size[0]}\n".encode("ascii"))
|
75 |
-
fp.write(f"#define im_height {im.size[1]}\n".encode("ascii"))
|
76 |
-
|
77 |
-
hotspot = im.encoderinfo.get("hotspot")
|
78 |
-
if hotspot:
|
79 |
-
fp.write(f"#define im_x_hot {hotspot[0]}\n".encode("ascii"))
|
80 |
-
fp.write(f"#define im_y_hot {hotspot[1]}\n".encode("ascii"))
|
81 |
-
|
82 |
-
fp.write(b"static char im_bits[] = {\n")
|
83 |
-
|
84 |
-
ImageFile._save(im, fp, [("xbm", (0, 0) + im.size, 0, None)])
|
85 |
-
|
86 |
-
fp.write(b"};\n")
|
87 |
-
|
88 |
-
|
89 |
-
Image.register_open(XbmImageFile.format, XbmImageFile, _accept)
|
90 |
-
Image.register_save(XbmImageFile.format, _save)
|
91 |
-
|
92 |
-
Image.register_extension(XbmImageFile.format, ".xbm")
|
93 |
-
|
94 |
-
Image.register_mime(XbmImageFile.format, "image/xbm")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_v_h_e_a.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
from fontTools.misc import sstruct
|
2 |
-
from fontTools.misc.textTools import safeEval
|
3 |
-
from fontTools.misc.fixedTools import (
|
4 |
-
ensureVersionIsLong as fi2ve,
|
5 |
-
versionToFixed as ve2fi,
|
6 |
-
)
|
7 |
-
from . import DefaultTable
|
8 |
-
import math
|
9 |
-
|
10 |
-
|
11 |
-
vheaFormat = """
|
12 |
-
> # big endian
|
13 |
-
tableVersion: L
|
14 |
-
ascent: h
|
15 |
-
descent: h
|
16 |
-
lineGap: h
|
17 |
-
advanceHeightMax: H
|
18 |
-
minTopSideBearing: h
|
19 |
-
minBottomSideBearing: h
|
20 |
-
yMaxExtent: h
|
21 |
-
caretSlopeRise: h
|
22 |
-
caretSlopeRun: h
|
23 |
-
caretOffset: h
|
24 |
-
reserved1: h
|
25 |
-
reserved2: h
|
26 |
-
reserved3: h
|
27 |
-
reserved4: h
|
28 |
-
metricDataFormat: h
|
29 |
-
numberOfVMetrics: H
|
30 |
-
"""
|
31 |
-
|
32 |
-
|
33 |
-
class table__v_h_e_a(DefaultTable.DefaultTable):
|
34 |
-
|
35 |
-
# Note: Keep in sync with table__h_h_e_a
|
36 |
-
|
37 |
-
dependencies = ["vmtx", "glyf", "CFF ", "CFF2"]
|
38 |
-
|
39 |
-
def decompile(self, data, ttFont):
|
40 |
-
sstruct.unpack(vheaFormat, data, self)
|
41 |
-
|
42 |
-
def compile(self, ttFont):
|
43 |
-
if ttFont.recalcBBoxes and (
|
44 |
-
ttFont.isLoaded("glyf")
|
45 |
-
or ttFont.isLoaded("CFF ")
|
46 |
-
or ttFont.isLoaded("CFF2")
|
47 |
-
):
|
48 |
-
self.recalc(ttFont)
|
49 |
-
self.tableVersion = fi2ve(self.tableVersion)
|
50 |
-
return sstruct.pack(vheaFormat, self)
|
51 |
-
|
52 |
-
def recalc(self, ttFont):
|
53 |
-
if "vmtx" in ttFont:
|
54 |
-
vmtxTable = ttFont["vmtx"]
|
55 |
-
self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
|
56 |
-
|
57 |
-
boundsHeightDict = {}
|
58 |
-
if "glyf" in ttFont:
|
59 |
-
glyfTable = ttFont["glyf"]
|
60 |
-
for name in ttFont.getGlyphOrder():
|
61 |
-
g = glyfTable[name]
|
62 |
-
if g.numberOfContours == 0:
|
63 |
-
continue
|
64 |
-
if g.numberOfContours < 0 and not hasattr(g, "yMax"):
|
65 |
-
# Composite glyph without extents set.
|
66 |
-
# Calculate those.
|
67 |
-
g.recalcBounds(glyfTable)
|
68 |
-
boundsHeightDict[name] = g.yMax - g.yMin
|
69 |
-
elif "CFF " in ttFont or "CFF2" in ttFont:
|
70 |
-
if "CFF " in ttFont:
|
71 |
-
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
72 |
-
else:
|
73 |
-
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
74 |
-
charStrings = topDict.CharStrings
|
75 |
-
for name in ttFont.getGlyphOrder():
|
76 |
-
cs = charStrings[name]
|
77 |
-
bounds = cs.calcBounds(charStrings)
|
78 |
-
if bounds is not None:
|
79 |
-
boundsHeightDict[name] = int(
|
80 |
-
math.ceil(bounds[3]) - math.floor(bounds[1])
|
81 |
-
)
|
82 |
-
|
83 |
-
if boundsHeightDict:
|
84 |
-
minTopSideBearing = float("inf")
|
85 |
-
minBottomSideBearing = float("inf")
|
86 |
-
yMaxExtent = -float("inf")
|
87 |
-
for name, boundsHeight in boundsHeightDict.items():
|
88 |
-
advanceHeight, tsb = vmtxTable[name]
|
89 |
-
bsb = advanceHeight - tsb - boundsHeight
|
90 |
-
extent = tsb + boundsHeight
|
91 |
-
minTopSideBearing = min(minTopSideBearing, tsb)
|
92 |
-
minBottomSideBearing = min(minBottomSideBearing, bsb)
|
93 |
-
yMaxExtent = max(yMaxExtent, extent)
|
94 |
-
self.minTopSideBearing = minTopSideBearing
|
95 |
-
self.minBottomSideBearing = minBottomSideBearing
|
96 |
-
self.yMaxExtent = yMaxExtent
|
97 |
-
|
98 |
-
else: # No glyph has outlines.
|
99 |
-
self.minTopSideBearing = 0
|
100 |
-
self.minBottomSideBearing = 0
|
101 |
-
self.yMaxExtent = 0
|
102 |
-
|
103 |
-
def toXML(self, writer, ttFont):
|
104 |
-
formatstring, names, fixes = sstruct.getformat(vheaFormat)
|
105 |
-
for name in names:
|
106 |
-
value = getattr(self, name)
|
107 |
-
if name == "tableVersion":
|
108 |
-
value = fi2ve(value)
|
109 |
-
value = "0x%08x" % value
|
110 |
-
writer.simpletag(name, value=value)
|
111 |
-
writer.newline()
|
112 |
-
|
113 |
-
def fromXML(self, name, attrs, content, ttFont):
|
114 |
-
if name == "tableVersion":
|
115 |
-
setattr(self, name, ve2fi(attrs["value"]))
|
116 |
-
return
|
117 |
-
setattr(self, name, safeEval(attrs["value"]))
|
118 |
-
|
119 |
-
# reserved0 is caretOffset for legacy reasons
|
120 |
-
@property
|
121 |
-
def reserved0(self):
|
122 |
-
return self.caretOffset
|
123 |
-
|
124 |
-
@reserved0.setter
|
125 |
-
def reserved0(self, value):
|
126 |
-
self.caretOffset = value
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/__init__.py
DELETED
File without changes
|