Commit
·
a7a03c1
1
Parent(s):
5ea34e0
Update parquet files (step 8 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Equipped Music Slow Motion Tokyo Soundscapes Vol 3 WAV REX2MAGNETRiXX.md +0 -30
- spaces/1gistliPinn/ChatGPT4/Examples/Doom 3 Wrong Dll Api Version Fix.md +0 -8
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Baraat - Lovepreet Download MP3 Song Punjabi Music.md +0 -43
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Apk Free Online Downloader Apkpure.com Https M.apkpure.com 2021.md +0 -58
- spaces/1phancelerku/anime-remove-background/FIFA 23 Xbox APK How to install and play the latest version of EA SPORTS FIFA 23 on your Android device.md +0 -127
- spaces/247Readings/README/README.md +0 -10
- spaces/A00001/bingothoo/src/components/ui/sheet.tsx +0 -122
- spaces/A666sxr/Genshin_TTS/inference_api.py +0 -66
- spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/nets_123812KB.py +0 -122
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/__init__.py +0 -0
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_melception.py +0 -241
- spaces/AIZeroToHero/03-ImageSearchSimilar/app.py +0 -185
- spaces/Ababababababbababa/Arabic_poem_classifier/README.md +0 -13
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/share/+server.ts +0 -58
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptX.py +0 -97
- spaces/AdVisual/MaskCut/connectionManager.py +0 -60
- spaces/Adapter/T2I-Adapter/ldm/data/__init__.py +0 -0
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/ClickOutsideMethods.js +0 -65
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollablePanel.js +0 -38
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ResolveHeight.js +0 -23
- spaces/AiiluoChen/webui/app.py +0 -72
- spaces/Ameaou/academic-chatgpt3.1/docs/README_FR.md +0 -296
- spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/python/dqn/__init__.py +0 -2
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/datasets/latents_dataset.py +0 -15
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/__init__.py +0 -9
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py +0 -125
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py +0 -600
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_xl/__init__.py +0 -0
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/text_to_video/test_video_to_video.py +0 -195
- spaces/Andy1621/uniformer_image_detection/configs/double_heads/README.md +0 -22
- spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py +0 -4
- spaces/Ani1712full/Estimacion_tasa_morosidad/README.md +0 -13
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/engine/__init__.py +0 -8
- spaces/Audio-AGI/AudioSep/models/CLAP/training/data.py +0 -975
- spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/text/text_decoder.py +0 -672
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/proposal_utils.py +0 -196
- spaces/AxelBell/EasyOCR_text_recognition/assets/style.css +0 -92
- spaces/Bart92/RVC_HF/lib/globals/globals.py +0 -5
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/__init__.py +0 -13
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/emoji.py +0 -96
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/padding.py +0 -141
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/ntlmpool.py +0 -130
- spaces/CGMatter/modelscope-text-to-video-synthesis/README.md +0 -13
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/train_net.py +0 -70
- spaces/Candeloro/anime-remove-background/app.py +0 -52
- spaces/ChandraMohanNayal/AutoGPT/tests/__init__.py +0 -0
- spaces/CofAI/chat/g4f/Provider/Providers/Ails.py +0 -87
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/scaleUpem.py +0 -395
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/ShareButton-8cd3d8f6.js +0 -2
- spaces/DaleChen/AutoGPT/autogpt/spinner.py +0 -65
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Equipped Music Slow Motion Tokyo Soundscapes Vol 3 WAV REX2MAGNETRiXX.md
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Create Ambient and Cinematic Soundscapes with Equipped Music Slow Motion Tokyo Vol 3</h1>
|
3 |
-
<p>If you are looking for a sample pack that can help you create ambient and cinematic soundscapes with a touch of Japanese flavor, you might want to check out Equipped Music Slow Motion Tokyo Soundscapes Vol 3. This pack contains over 3 GB of WAV and REX2 files that are designed to inspire you with atmospheric pads, lush strings, ethereal vocals, organic percussion, and more.</p>
|
4 |
-
<h2>Equipped Music Slow Motion Tokyo Soundscapes Vol 3 WAV REX2MAGNETRiXX</h2><br /><p><b><b>Download</b> • <a href="https://byltly.com/2uKxno">https://byltly.com/2uKxno</a></b></p><br /><br />
|
5 |
-
<p>Equipped Music Slow Motion Tokyo Soundscapes Vol 3 is the third installment of the popular series that features sounds recorded and processed in Tokyo, Japan. The pack captures the essence of the city's nightlife, culture, and history, and blends it with modern production techniques and sound design. Whether you are making ambient, downtempo, chillout, cinematic, or experimental music, you will find plenty of sonic material to spark your creativity.</p>
|
6 |
-
<p>Some of the highlights of the pack include:</p>
|
7 |
-
<ul>
|
8 |
-
<li>24-bit quality WAV files that are ready to use in any DAW or sampler</li>
|
9 |
-
<li>REX2 files that can be sliced and manipulated for more flexibility and variation</li>
|
10 |
-
<li>Over 800 loops and samples that cover a wide range of tempos and styles</li>
|
11 |
-
<li>Over 40 construction kits that contain full mixes and individual stems for easy arrangement and remixing</li>
|
12 |
-
<li>Over 200 one-shot samples that include drums, basses, synths, FX, vocals, and more</li>
|
13 |
-
<li>Over 100 MIDI files that can be used to trigger your own sounds or modify the existing loops</li>
|
14 |
-
<li>A bonus folder that contains extra sounds from previous volumes of the series</li>
|
15 |
-
</ul>
|
16 |
-
<p>Equipped Music Slow Motion Tokyo Soundscapes Vol 3 is available for download from various online stores and platforms. You can also get it as part of the Equipped Music Bundle, which includes all three volumes of the series plus other sample packs from Equipped Music. If you are looking for a sample pack that can transport you to the streets of Tokyo and immerse you in its unique atmosphere, don't miss this opportunity to get Equipped Music Slow Motion Tokyo Soundscapes Vol 3.</p>
|
17 |
-
|
18 |
-
<p>Now that you have an overview of what Equipped Music Slow Motion Tokyo Soundscapes Vol 3 has to offer, let's take a closer look at some of the sounds and features that make this pack stand out. In this section, we will explore some of the construction kits, loops, and one-shots that you can use to create your own soundscapes.</p>
|
19 |
-
<p></p>
|
20 |
-
<h2>Construction Kits</h2>
|
21 |
-
<p>The construction kits are the main attraction of the pack, as they provide you with ready-made tracks that you can use as they are or customize to your liking. Each kit contains a full mix and individual stems for drums, bass, synths, pads, vocals, FX, and more. You can mix and match the stems from different kits to create new combinations and variations. You can also use the MIDI files to change the melodies, chords, or rhythms of the loops.</p>
|
22 |
-
<p>The construction kits cover a range of tempos from 60 to 120 BPM and a variety of styles from ambient to cinematic. Some of the kits are inspired by specific locations or scenes in Tokyo, such as Shibuya Crossing, Shinjuku Station, Harajuku Street, or Tokyo Tower. Others are more abstract and evoke a certain mood or emotion, such as Dreamy, Nostalgic, Mysterious, or Romantic. You can use the kits as a starting point for your own compositions or as background music for your videos, podcasts, games, or other projects.</p>
|
23 |
-
<h2>Loops</h2>
|
24 |
-
<p>If you want more flexibility and control over your soundscapes, you can use the loops section of the pack. Here you will find over 800 loops that are categorized into drums, basses, synths, pads, vocals, FX, and more. The loops are also labeled by tempo and key for easy browsing and compatibility. You can use the loops to create your own patterns and sequences or layer them with the construction kits for more complexity and depth.</p>
|
25 |
-
<p>The loops section contains a variety of sounds that can add texture and flavor to your soundscapes. For example, you can use the drum loops to add rhythm and groove to your tracks. The drum loops include acoustic and electronic drums as well as organic percussion such as shakers, bells, claps, snaps, and more. You can also use the bass loops to add low-end and warmth to your tracks. The bass loops include electric and synth basses as well as sub-basses and drones. You can also use the synth loops to add melody and harmony to your tracks. The synth loops include leads, arps, plucks, keys, organs, and more.</p>
|
26 |
-
<h2>One-Shots</h2>
|
27 |
-
<p>If you want to create your own sounds from scratch or add some extra elements to your soundscapes, you can use the one-shot section of the pack. Here you will find over 200 one-shot samples that include drums, basses, synths, FX, vocals, and more. You can load the one-shots into your favorite sampler or DAW and trigger them manually or with MIDI. You can also use the one-shots to create your own loops or layer them with the existing ones for more diversity and richness.</p>
|
28 |
-
<p>The one-shot section contains a variety of sounds that can spice up your soundscapes. For example,</p> 81aa517590<br />
|
29 |
-
<br />
|
30 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Doom 3 Wrong Dll Api Version Fix.md
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>immediately i realised that it was simpler to use a self-contained library rather than to try to pack all the relevant data together with the program when it was already self-contained, and that was the end of it. the actual doom protocol itself is rather complex, especially if you have a large number of monsters, but it's managed to avoid me having to write an entirely separate api to do it all, and everything seems to work fine.</p>
|
3 |
-
<p>it's a testament to the quality of the original game itself that having a non-doom-engine version is such a non-issue; after all, the original, dos-era doom was a very complete game itself. for those that are interested, i've created an openmw template, as well as an installation file for a default doom 3 build. with both of these, you should be able to replace the doom3d.dll and its dependencies with mine without too much trouble.</p>
|
4 |
-
<h2>doom 3 wrong dll api version fix</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://imgfil.com/2uy1zg">https://imgfil.com/2uy1zg</a></b></p><br /><br />
|
5 |
-
<p>basically, there was quite a lot of code duplication going on, so what i did was find any place where that code was potentially unused, and just provide the extra functions elsewhere. at the time i was maintaining the game (it changed hands quite a bit), i had my own plans for the engine, and thought i would go ahead and check to see if i could do a basic doom 2 engine in six months, and i just forgot about doom3 for a few years. it was always a bit of a hobby, so i just kind of slacked off on it and left it to others.</p>
|
6 |
-
<p>only at the release of doom 3 engine, for the first time since doom source release in 1999, i found out that the game itself never supported the multiplatform uses. like all the other engines that have been created since, such as the infinity engine, a doom 3 port was well underway by this time. my first urge was to fix this bug, but in a few days after i do it, a new editor comes out and i spent a whole week on it. it was too good to risk before being released for registration for the first time.</p> 899543212b<br />
|
7 |
-
<br />
|
8 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Baraat - Lovepreet Download MP3 Song Punjabi Music.md
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Baraat Lovepreet Mp3 Song Download: A Review</h1>
|
3 |
-
<p>If you are looking for a foot-tapping Punjabi song that celebrates the joy of wedding procession, you might want to check out Baraat by Lovepreet. This song was released in 2015 by T-Series Apna Punjab and composed by Beat Minister. It features the singer Vlove aka Lovepreet in a colorful and lively video that showcases the fun and excitement of a baraat. In this article, we will review the song's lyrics, music, video, and reception, and tell you why you should download it.</p>
|
4 |
-
<h2>The Lyrics: What is the message of the song and how does it relate to the theme of baraat?</h2>
|
5 |
-
<p>The lyrics of Baraat are written by Jassi Lohka and they express the happiness and pride of the groom and his friends as they arrive at the bride's house. The song uses various metaphors and similes to describe the groom's appearance, such as "he looks like a king", "he is shining like a star", and "he is wearing a crown of flowers". The song also praises the groom's personality, saying that he is brave, loyal, generous, and respectful. The chorus of the song repeats the word "baraat" several times, emphasizing the importance of this tradition in Punjabi culture. The lyrics also mention some of the rituals and customs that are part of a baraat, such as dancing, singing, playing instruments, throwing flowers, and applying tilak.</p>
|
6 |
-
<h2>baraat lovepreet mp3 song download</h2><br /><p><b><b>Download</b> > <a href="https://urlin.us/2uT00L">https://urlin.us/2uT00L</a></b></p><br /><br />
|
7 |
-
<h2>The Music: How does the beat, melody, and instrumentation create a festive mood?</h2>
|
8 |
-
<p>The music of Baraat is composed by Beat Minister, who is known for his catchy and upbeat tunes. The song has a fast tempo and a rhythmic pattern that makes it easy to dance to. The melody is catchy and memorable, with a hook that repeats throughout the song. The instrumentation consists of various traditional and modern instruments, such as dhol, dammu, shehnai, nadswaram, guitar, keyboard, and drums. The music creates a festive mood by using bright and cheerful sounds that match the theme of baraat.</p>
|
9 |
-
<h2>The Video: How does the visual representation of the song enhance its appeal?</h2>
|
10 |
-
<p>The video of Baraat is directed by Jashan Nanarh and it features Vlove aka Lovepreet as the groom who arrives at his bride's house with his friends. The video is colorful and lively, with vibrant costumes, decorations, fireworks, and props. The video shows various scenes of the baraat procession, such as riding on a horse or a car, dancing on the road or in front of a temple, throwing flowers or money in the air, applying tilak or garlands to each other, and entering the wedding venue. The video also shows some glimpses of the bride waiting for her groom inside her house. The video enhances the appeal of the song by showing how much fun and excitement a baraat can bring to a wedding.</p>
|
11 |
-
<h2>The Reception: How did the audience and critics react to the song and its video?</h2>
|
12 |
-
<p>The song and its video received a positive response from both the audience and critics. The song became a hit among Punjabi music lovers and was played at many weddings and parties. The song also received praise from other singers and celebrities who appreciated its catchy tune and lively lyrics. The video also gained popularity on YouTube, where it has over 7 million views as of June 2023. The video also received positive comments from viewers who liked its colorful visuals and energetic performance.</p>
|
13 |
-
<h3>Conclusion: A summary of the main points and a recommendation for downloading the song.</h3>
|
14 |
-
<p>Baraat by Lovepre <p>Baraat by Lovepreet is a Punjabi song that celebrates the joy of wedding procession. It has catchy lyrics, upbeat music, colorful video, and positive reception. It is a perfect song to play at your own or your friend's baraat. You can download the mp3 song from various online platforms, such as iTunes, Spotify, Gaana, Wynk, or YouTube. You can also watch the video on YouTube or T-Series Apna Punjab's official website. If you are looking for a fun and festive song to add to your playlist, you should definitely download Baraat by Lovepreet.</p>
|
15 |
-
<h3>FAQs: Five common questions and answers about the song and its download.</h3>
|
16 |
-
<table>
|
17 |
-
<tr>
|
18 |
-
<th>Question</th>
|
19 |
-
<th>Answer</th>
|
20 |
-
</tr>
|
21 |
-
<tr>
|
22 |
-
<td>Who is the singer of Baraat?</td>
|
23 |
-
<td>The singer of Baraat is Vlove aka Lovepreet, who is a Punjabi singer and actor. He has also sung other songs, such as Dil Da Plot, Jatt Mehkma, and Yaar Beli.</td>
|
24 |
-
</tr>
|
25 |
-
<tr>
|
26 |
-
<td>Who is the composer of Baraat?</td>
|
27 |
-
<td>The composer of Baraat is Beat Minister, who is a Punjabi music producer and director. He has also composed music for other singers, such as Ranjit Bawa, Jazzy B, and Diljit Dosanjh.</td>
|
28 |
-
</tr>
|
29 |
-
<tr>
|
30 |
-
<td>What is the meaning of baraat?</td>
|
31 |
-
<td>Baraat is a Hindi word that means wedding procession. It is a tradition in Indian weddings where the groom and his friends and relatives arrive at the bride's house or wedding venue in a festive manner. They usually ride on horses or cars, dance on the road or in front of a temple, throw flowers or money in the air, apply tilak or garlands to each other, and enter the wedding venue.</td>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>How can I download Baraat mp3 song?</td>
|
35 |
-
<td>You can download Baraat mp3 song from various online platforms, such as iTunes, Spotify, Gaana, Wynk, or YouTube. You can also watch the video on YouTube or T-Series Apna Punjab's official website.</td>
|
36 |
-
</tr>
|
37 |
-
<tr>
|
38 |
-
<td>How can I play Baraat at my own or my friend's baraat?</td>
|
39 |
-
<td>You can play Baraat at your own or your friend's baraat by downloading the mp3 song and playing it on your phone, speaker, or DJ system. You can also request the DJ to play the song if you have hired one. You can also dance along with the song and enjoy the festive mood.</td>
|
40 |
-
</tr>
|
41 |
-
</table></p> 197e85843d<br />
|
42 |
-
<br />
|
43 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Apk Free Online Downloader Apkpure.com Https M.apkpure.com 2021.md
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download APK Free Online Downloader APKPure.com</h1>
|
3 |
-
<p>If you are an Android user, you might have heard of the term APK file. But what is it exactly and how can you download it for free online? In this article, we will explain what an APK file is, how to download it from APKPure.com, and what benefits you can get from using this website as your online downloader.</p>
|
4 |
-
<h2>download apk free online downloader apkpure.com https m.apkpure.com</h2><br /><p><b><b>Download File</b> > <a href="https://urlin.us/2uSYdd">https://urlin.us/2uSYdd</a></b></p><br /><br />
|
5 |
-
<h2>What is an APK file and why you need it</h2>
|
6 |
-
<p>An APK file stands for Android Package Kit, which is the file format used by the Android operating system for distributing and installing mobile apps, games, and middleware. A file using this format can be built from source code written in either Java or Kotlin. </p>
|
7 |
-
<p>An APK file contains all of a program's code, resources, assets, certificates, and manifest file. It is similar to other software packages such as APPX in Microsoft Windows or .app on HarmonyOS. To make an APK file, a program for Android is first compiled using a tool such as Android Studio or Visual Studio and then all of its parts are packaged into one container file. </p>
|
8 |
-
<p>You might need an APK file for various reasons, such as:</p>
|
9 |
-
<ul>
|
10 |
-
<li>To install apps or games that are not available on Google Play Store due to regional restrictions, licensing issues, or compatibility problems.</li>
|
11 |
-
<li>To install an older or newer version of an existing app or game that suits your preferences or device specifications.</li>
|
12 |
-
<li>To test or debug your own app or game before publishing it on Google Play Store.</li>
|
13 |
-
<li>To modify or customize your app or game with additional features or settings.</li>
|
14 |
-
</ul>
|
15 |
-
<p>However, you cannot directly install an APK file on your Android device without enabling unknown sources in your settings. This is because Android blocks the installation of apps from sources other than trusted ones like Google Play Store for security reasons. Therefore, you need to find a reliable website that offers free online downloader for APK files.</p>
|
16 |
-
<p></p>
|
17 |
-
<h2>How to download APK files from APKPure.com</h2>
|
18 |
-
<p>One of the best websites that you can use to download APK files for free online is APKPure.com. This website provides access to thousands of apps and games that are not available on Google Play Store or are region-locked. You can also download the latest versions of your favorite apps and games with ease and speed. Here are the steps to download APK files from APKPure.com:</p>
|
19 |
-
<h4>Step 1: Visit the website https://m.apkpure.com</h4>
|
20 |
-
<p>Open your web browser on your Android device and go to the website https://m.apkpure.com. You will see a simple and clean interface with a search bar at the top and some categories below.</p>
|
21 |
-
<h4>Step 2: Search for the app or game you want to download</h4>
|
22 |
-
<p>Type the name of the app or game you want to download in the search bar and hit enter. You can also browse through the categories such as popular apps, new releases, editors' choice, etc. to find what you are looking for.</p>
|
23 |
-
<h4>Step 3: Choose the version and click on the download button</h4>
|
24 |
-
<p>Once you find the app or game you want, click on it to see more details such as description, screenshots, ratings, reviews, etc. You can also choose the version you want from the drop-down menu at the top right corner. Then, click on the green download button at the bottom of the page. The download will start automatically and you will see a progress bar at the bottom of the screen.</p>
|
25 |
-
<h4>Step 4: Enable unknown sources and install the APK file</h4>
|
26 |
-
<p>After the download is complete, you will need to enable unknown sources in your settings to install the APK file. To do this, go to your device settings and look for security or privacy options. Then, find the option that says unknown sources or install from unknown sources and toggle it on. You might see a warning message that says installing from unknown sources can harm your device, but you can ignore it if you trust the source.</p>
|
27 |
-
<p>Then, go back to your web browser and open the downloaded APK file. You will see a prompt that asks you to confirm the installation. Tap on install and wait for a few seconds until the installation is done. You can then open the app or game from your app drawer or home screen and enjoy it.</p>
|
28 |
-
<h2>Benefits of using APKPure.com as your online downloader</h2>
|
29 |
-
<p>There are many benefits of using APKPure.com as your online downloader for APK files, such as:</p>
|
30 |
-
<h3>Access to thousands of apps and games not available on Google Play Store</h3>
|
31 |
-
<p>With APKPure.com, you can download apps and games that are not available on Google Play Store due to various reasons, such as regional restrictions, licensing issues, or compatibility problems. For example, you can download PUBG Mobile Lite, which is a lighter version of the popular battle royale game that is designed for low-end devices and regions with poor internet connection. You can also download apps and games that are banned or removed from Google Play Store, such as Fortnite, TikTok, or Flappy Bird.</p>
|
32 |
-
<h3>Download APK files in high speed and quality</h3>
|
33 |
-
<p>APKPure.com offers high speed and quality downloads for APK files. You can download APK files in a matter of seconds or minutes depending on your internet connection and file size. You can also choose the download quality from low, medium, or high depending on your preference and device specifications. APKPure.com also supports resume and pause downloads, so you don't have to worry about losing your progress if your connection is interrupted.</p>
|
34 |
-
<h3>Update your apps and games with the latest versions</h3>
|
35 |
-
<p>APKPure.com also helps you update your apps and games with the latest versions. You can check for updates manually or enable automatic updates in the settings. You can also see the changelog and release notes for each update to know what's new and improved. Updating your apps and games regularly can help you fix bugs, improve performance, and enjoy new features.</p>
|
36 |
-
<h3>Save your mobile data and storage space</h3>
|
37 |
-
<p>Another benefit of using APKPure.com is that it can help you save your mobile data and storage space. APKPure.com uses advanced compression technology to reduce the file size of APK files without compromising their quality. This means you can download more apps and games with less data usage and storage consumption. You can also delete unwanted or unused apps and games from your device with one tap using the uninstall feature.</p>
|
38 |
-
<h3>Enjoy a user-friendly and secure interface</h3>
|
39 |
-
<p>Last but not least, APKPure.com has a user-friendly and secure interface that makes it easy and safe to use. The website has a simple and clean design that allows you to find what you are looking for quickly and easily. The website also has a rating system, a comment section, and a report button that let you share your feedback, opinions, and concerns with other users and developers. The website also uses SSL encryption to protect your personal information and data from hackers and malware.</p>
|
40 |
-
<h2>Conclusion</h2>
|
41 |
-
<p>In conclusion, APKPure.com is one of the best websites that you can use to download APK files for free online. It offers access to thousands of apps and games that are not available on Google Play Store or are region-locked. It also offers high speed and quality downloads, updates, data saving, storage saving, and user-friendly features. If you are an Android user who wants to enjoy more apps and games on your device, you should definitely try APKPure.com as your online downloader.</p>
|
42 |
-
<h2>FAQs</h2>
|
43 |
-
<ul>
|
44 |
-
<li><b>What is an APK file?</b></li>
|
45 |
-
<li>An APK file is a file format used by the Android operating system for distributing and installing mobile apps, games, and middleware.</li>
|
46 |
-
<li><b>How do I download an APK file from APKPure.com?</b></li>
|
47 |
-
<li>You can download an APK file from APKPure.com by following these steps: visit the website https://m.apkpure.com, search for the app or game you want to download, choose the version and click on the download button, enable unknown sources in your settings, and install the APK file.</li>
|
48 |
-
<li><b> <li>What are the benefits of using APKPure.com as my online downloader?</b></li>
|
49 |
-
<li>Some of the benefits of using APKPure.com as your online downloader are: access to thousands of apps and games not available on Google Play Store, high speed and quality downloads, updates, data saving, storage saving, and user-friendly features.</li>
|
50 |
-
<li><b>Is APKPure.com safe and legal to use?</b></li>
|
51 |
-
<li>APKPure.com is safe and legal to use as long as you download APK files from trusted sources and developers. The website uses SSL encryption to protect your personal information and data from hackers and malware. However, you should always be careful when installing apps or games from unknown sources and scan them for viruses or malware before opening them.</li>
|
52 |
-
<li><b>Can I request an app or game that is not available on APKPure.com?</b></li>
|
53 |
-
<li>Yes, you can request an app or game that is not available on APKPure.com by using the feedback feature on the website. You can also join the APKPure community on Facebook, Twitter, Instagram, or YouTube and share your suggestions and requests with other users and developers.</li>
|
54 |
-
<li><b>How do I contact APKPure.com if I have any questions or issues?</b></li>
|
55 |
-
<li>You can contact APKPure.com by using the contact us feature on the website or by sending an email to [email protected]. You can also visit the help center or the FAQ section on the website for more information and guidance.</li>
|
56 |
-
</ul></p> 197e85843d<br />
|
57 |
-
<br />
|
58 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FIFA 23 Xbox APK How to install and play the latest version of EA SPORTS FIFA 23 on your Android device.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>FIFA 23 Xbox APK Download: Everything You Need to Know</h1>
|
3 |
-
<p>If you are a fan of football games, you have probably heard of FIFA 23, the latest installment in the popular EA Sports series. FIFA 23 is a football video game that features HyperMotion2 Technology, cross-play on same-generation consoles, and both men's and women's FIFA World Cup tournaments. It also has new FUT Moments, a revamped Chemistry system, new ICONs and FUT Heroes, and a more authentic Career Mode.</p>
|
4 |
-
<h2>fifa 23 xbox apk download</h2><br /><p><b><b>DOWNLOAD</b> ○ <a href="https://jinyurl.com/2uNMVa">https://jinyurl.com/2uNMVa</a></b></p><br /><br />
|
5 |
-
<p>But did you know that you can also download FIFA 23 on your Xbox as an APK file? APK files are applications or games that are designed for Android devices, but can also be installed on other platforms with some tweaks. By installing APK files on your Xbox, you can enjoy some exclusive content and features that are not available on the official version of the game.</p>
|
6 |
-
<p>In this article, we will tell you everything you need to know about FIFA 23 Xbox APK download, including its features, gameplay, installation options, and how to install APK files on your console. Let's get started!</p>
|
7 |
-
<h2>FIFA 23 Features and Gameplay</h2>
|
8 |
-
<p>FIFA 23 is one of the most anticipated games of the year, and for good reason. It offers a lot of new and improved features and gameplay elements that make it more realistic, immersive, and fun than ever before. Here are some of the highlights:</p>
|
9 |
-
<p>fifa 23 xbox apk download free<br />
|
10 |
-
fifa 23 xbox apk download for android<br />
|
11 |
-
fifa 23 xbox apk download full version<br />
|
12 |
-
fifa 23 xbox apk download offline<br />
|
13 |
-
fifa 23 xbox apk download with obb<br />
|
14 |
-
fifa 23 xbox apk download no verification<br />
|
15 |
-
fifa 23 xbox apk download highly compressed<br />
|
16 |
-
fifa 23 xbox apk download latest update<br />
|
17 |
-
fifa 23 xbox apk download mod<br />
|
18 |
-
fifa 23 xbox apk download cracked<br />
|
19 |
-
fifa 23 xbox apk download unlimited money<br />
|
20 |
-
fifa 23 xbox apk download without human verification<br />
|
21 |
-
fifa 23 xbox apk download hack<br />
|
22 |
-
fifa 23 xbox apk download mega link<br />
|
23 |
-
fifa 23 xbox apk download mediafire link<br />
|
24 |
-
fifa 23 xbox apk download google drive link<br />
|
25 |
-
fifa 23 xbox apk download direct link<br />
|
26 |
-
fifa 23 xbox apk download torrent link<br />
|
27 |
-
fifa 23 xbox apk download pc<br />
|
28 |
-
fifa 23 xbox apk download windows 10<br />
|
29 |
-
fifa 23 xbox apk download mac<br />
|
30 |
-
fifa 23 xbox apk download laptop<br />
|
31 |
-
fifa 23 xbox apk download emulator<br />
|
32 |
-
fifa 23 xbox apk download bluestacks<br />
|
33 |
-
fifa 23 xbox apk download nox player<br />
|
34 |
-
fifa 23 xbox apk download gameplay<br />
|
35 |
-
fifa 23 xbox apk download review<br />
|
36 |
-
fifa 23 xbox apk download features<br />
|
37 |
-
fifa 23 xbox apk download tips and tricks<br />
|
38 |
-
fifa 23 xbox apk download guide<br />
|
39 |
-
fifa 23 xbox apk download cheats<br />
|
40 |
-
fifa 23 xbox apk download codes<br />
|
41 |
-
fifa 23 xbox apk download redeem codes<br />
|
42 |
-
fifa 23 xbox apk download how to install<br />
|
43 |
-
fifa 23 xbox apk download how to play<br />
|
44 |
-
fifa 23 xbox apk download how to update<br />
|
45 |
-
fifa 23 xbox apk download how to get coins and points<br />
|
46 |
-
fifa 23 xbox apk download how to unlock players and teams<br />
|
47 |
-
fifa 23 xbox apk download how to fix errors and bugs<br />
|
48 |
-
fifa 23 xbox apk download how to transfer data from old device to new device</p>
|
49 |
-
<h3>HyperMotion2 Technology</h3>
|
50 |
-
<p>One of the biggest innovations in FIFA 23 is HyperMotion2 Technology, which is only available on PlayStation 5, Xbox Series X|S, PC, and Stadia versions. HyperMotion2 Technology uses real match data capture from over 6000 football animations to deliver more realistic and varied gameplay in every match across every mode in FIFA 23.</p>
|
51 |
-
<p>With HyperMotion2 Technology, you can see different shot trajectories, new passing types, hard clearance slide tackles, backheel tackles, advanced impact physics, net interaction physics, player awareness, and more. You can also experience unique motion capture for women's club football teams, which brings more authenticity to the women's game.</p>
|
52 |
-
<h3>FIFA World Cup</h3>
|
53 |
-
<p>Another exciting feature in FIFA 23 is the inclusion of both men's and women's FIFA World Cup tournaments as post-launch content updates at no additional cost. You can experience the pinnacle of international football with the FIFA World Cup Qatar 2022™ and FIFA Women’s World Cup Australia and New Zealand 2023™ in FIFA 23.</p>
|
54 |
-
<p>You can play as any of the qualified teams in the tournaments, or create your own custom tournament with your favorite teams. You can also enjoy updated rosters, kits, stadiums, graphics, commentary, and atmosphere that reflect the real-world events.</p>
|
55 |
-
<h3>Women's Club Football</h3>
|
56 |
-
<p>For the first time in EA Sports FIFA history, you can play as women's club teams in FIFA 23. You can choose from 12 of the best women's club teams in the world, including Barcelona, Chelsea, Lyon, PSG, and more. You can also create your own custom women's club team with the new Create a Club feature in Career Mode.</p>
|
57 |
-
<p>You can play women's club football matches in Kick Off, Career Mode, Tournament Mode, and Online Friendlies. You can also enjoy new commentary, presentation, and broadcast elements that showcase the women's game.</p>
|
58 |
-
<h3>Cross-Play</h3>
|
59 |
-
<p>Another new feature in FIFA 23 is cross-play, which allows you to play with friends on different platforms of the same generation. For example, you can play with someone on PlayStation 5 if you are on Xbox Series X|S, or with someone on PlayStation 4 if you are on Xbox One.</p>
|
60 |
-
<p>To enable cross-play, you need to create an EA account and link it to your console account. Then, you can invite your friends to join your lobby or accept their invitations. You can also use voice chat and text chat to communicate with your friends across platforms.</p>
|
61 |
-
<h3>Other Gameplay Improvements</h3>
|
62 |
-
<p>Besides the features mentioned above, FIFA 23 also has many other gameplay improvements that make it more enjoyable and realistic. Some of them are:</p>
|
63 |
-
<ul>
|
64 |
-
<li>New shooting mechanics: You can use the new shot meter to time your shots and control your power and accuracy. You can also use the new finesse shot button to curl the ball into the corners of the net.</li>
|
65 |
-
<li>New passing mechanics: You can use the new pass meter to adjust your pass power and direction. You can also use the new through ball button to play more precise passes behind the defense.</li>
|
66 |
-
<li>New defending mechanics: You can use the new tackle button to perform more effective and aggressive tackles. You can also use the new jockey button to position yourself better and block shots and passes.</li>
|
67 |
-
<li>New set piece mechanics: You can use the new free kick and penalty kick systems to aim and curve your shots with more precision and variety. You can also use the new corner kick system to deliver more accurate crosses and headers.</li>
|
68 |
-
</ul>
|
69 |
-
<h2>How to Download FIFA 23 on Xbox</h2>
|
70 |
-
<p>If you want to download FIFA 23 on your Xbox, you have two main options: buying it from the Microsoft Store or subscribing to Xbox Game Pass. Here are the details of each option:</p>
|
71 |
-
<h3>Buying Options</h3>
|
72 |
-
<p>You can buy FIFA 23 from the Microsoft Store as a digital download or as a physical disc. The digital download option allows you to pre-order the game and pre-load it before its release date, so you can start playing as soon as it launches. The physical disc option allows you to own a copy of the game that you can install on your console or lend to your friends.</p>
|
73 |
-
<p>The price of FIFA 23 depends on the edition you choose. There are three editions available: Standard Edition, Champions Edition, and Ultimate Edition. The Standard Edition costs $59.99 USD and includes the base game and some pre-order bonuses. The Champions Edition costs $79.99 USD and includes everything in the Standard Edition plus three days early access, a FUT Ambassador Loan Item, a Career Mode Homegrown Talent perk, and more. The Ultimate Edition costs $99.99 USD and includes everything in the Champions Edition plus a FUT Hero Item, a FUT Ones to Watch Item, Dual Entitlement (which allows you to upgrade from Xbox One to Xbox Series X|S for free), and more.</p>
|
74 |
-
<h3>Installing Options</h3>
|
75 |
-
<p>Once you have bought FIFA 23, you need to install it on your console before you can play it. The installation process depends on whether you have bought it as a digital download or as a physical disc.</p>
|
76 |
-
<p>If you have bought it as a digital download, you need to go to My Games & Apps on your console and select FIFA 23 from the Ready to Install section. Then, you need to follow the on-screen instructions to download and install the game on your console.</p>
|
77 |
-
<p>If you have bought it as a physical disc, you need to insert the disc into your console and wait for it to be recognized. Then, you need to follow the on-screen instructions to install the game on your console. You may also need to download some updates before you can play the game.</p>
|
78 |
-
<h3>Remote Installation</h3>
|
79 |
-
<p>If you want to install FIFA 23 on your Xbox when you are away from your console, you can use remote installation. Remote installation allows you to install games from your phone or PC using the Xbox app or the Microsoft Store website.[^12^ ) To use remote installation, you need to have your console turned on or in instant-on mode, and connected to the internet. You also need to have your console set as your home Xbox, and have enough storage space available.</p>
|
80 |
-
<p>To install FIFA 23 from your phone, you need to download the Xbox app from the App Store or Google Play Store and sign in with your Microsoft account. Then, you need to go to the Store section and search for FIFA 23. Then, you need to tap on the Buy button and choose the edition you want. After you have completed the purchase, you need to tap on the Install on my devices button and select your console from the list. The game will start downloading and installing on your console automatically.</p>
|
81 |
-
<p>To install FIFA 23 from your PC, you need to go to the Microsoft Store website and sign in with your Microsoft account. Then, you need to search for FIFA 23 and click on the Buy button and choose the edition you want. After you have completed the purchase, you need to click on the Install on my devices button and select your console from the list. The game will start downloading and installing on your console automatically.</p>
|
82 |
-
<h2>How to Install APK Files on Xbox</h2>
|
83 |
-
<p>If you want to install APK files on your Xbox, you need to know what they are, why you might want to install them, and how to install them. Here are the answers:</p>
|
84 |
-
<h3>What are APK Files</h3>
|
85 |
-
<p>APK files are application or game files that are designed for Android devices. They are similar to EXE files for Windows or DMG files for Mac. They contain all the necessary data and code to run an app or game on an Android device.</p>
|
86 |
-
<p>APK files can be downloaded from various sources, such as official app stores, third-party websites, or file-sharing platforms. However, not all APK files are safe or compatible with your device, so you need to be careful when downloading them.</p>
|
87 |
-
<h3>Why Install APK Files on Xbox</h3>
|
88 |
-
<p>Installing APK files on your Xbox can have some benefits, such as:</p>
|
89 |
-
<ul>
|
90 |
-
<li>Expanding your gaming options: You can play some Android games that are not available on Xbox, such as PUBG Mobile, Among Us, Genshin Impact, and more.</li>
|
91 |
-
<li>Accessing exclusive content: You can access some content that is only available on Android versions of some games, such as skins, maps, modes, and more.</li>
|
92 |
-
<li>Saving money: You can play some games that are free or cheaper on Android than on Xbox, such as Minecraft, GTA San Andreas, Stardew Valley, and more.</li>
|
93 |
-
</ul>
|
94 |
-
<h3>How to Install APK Files on Xbox</h3>
|
95 |
-
<p>To install APK files on your Xbox, you need to follow these steps:</p>
|
96 |
-
<ol>
|
97 |
-
<li>Enable Developer Mode: You need to enable developer mode on your console, which allows you to run unsigned code and apps. To do this, you need to register as a developer at [https://developer.microsoft.com/en-us/games/xbox/xbox-one/getting-started] and pay a one-time fee of $19 USD. Then, you need to download the Dev Mode Activation app from the Microsoft Store on your console and follow the instructions to activate developer mode.</li>
|
98 |
-
<li>Install an Android Emulator: You need to install an Android emulator on your console, which allows you to run Android apps and games. To do this, you need to download an emulator app from a trusted source, such as RetroArch or BlueStacks. Then, you need to transfer the app file to your console using a USB drive or a network connection. Then, you need to launch the app from the Dev Home screen on your console and follow the instructions to install it.</li>
|
99 |
-
<li>Install APK Files: You need to install APK files on your console using the Android emulator. To do this, you need to download APK files from a trusted source, such as APKPure or APKMirror. Then, you need to transfer the APK files to your console using a USB drive or a network connection. Then, you need to launch the Android emulator app from the Dev Home screen on your console and follow the instructions to install the APK files.</li>
|
100 |
-
</ol>
|
101 |
-
<h2>Conclusion</h2>
|
102 |
-
<p>In conclusion, FIFA 23 is a great football game that offers a lot of new and improved features and gameplay elements that make it more realistic, immersive, and fun than ever before. You can also download FIFA 23 on your Xbox as an APK file and enjoy some exclusive content and features that are not available on the official version of the game.</p>
|
103 |
-
<p>If you want to try out FIFA 23 on your Xbox, you can buy it from the Microsoft Store or subscribe to Xbox Game Pass and install it on your console. You can also install APK files on your Xbox using an Android emulator and access some Android games and content that are not available on Xbox. However, you need to be careful when downloading APK files and enable developer mode on your console, which may void your warranty or expose you to security risks.</p>
|
104 |
-
<p>We hope you found this article helpful and informative. If you have any questions or feedback, please let us know in the comments below. And if you enjoyed this article, please share it with your friends and fellow gamers. Thank you for reading!</p>
|
105 |
-
<h2>FAQs</h2>
|
106 |
-
<p>Here are some frequently asked questions about FIFA 23 Xbox APK download:</p>
|
107 |
-
<h3>Q: When will FIFA 23 be released?</h3>
|
108 |
-
<p>A: FIFA 23 will be released on October 1, 2023 for PlayStation 5, Xbox Series X|S, PC, Stadia, PlayStation 4, Xbox One, and Nintendo Switch.</p>
|
109 |
-
<h3>Q: How much storage space do I need to install FIFA 23 on my Xbox?</h3>
|
110 |
-
<p>A: You need at least 50 GB of free storage space to install FIFA 23 on your Xbox.</p>
|
111 |
-
<h3>Q: What are the minimum and recommended system requirements for FIFA 23 on PC?</h3>
|
112 |
-
<p>A: The minimum and recommended system requirements for FIFA 23 on PC are as follows:</p>
|
113 |
-
<table>
|
114 |
-
<tr><th>Minimum</th><th>Recommended</th></tr>
|
115 |
-
<tr><td>OS: Windows 10 (64-bit)</td><td>OS: Windows 10 (64-bit)</td></tr>
|
116 |
-
<tr><td>CPU: Intel Core i3-6100 or AMD Athlon X4 880K</td><td>CPU: Intel Core i5-9600K or AMD Ryzen 5 2600X</td></tr>
|
117 |
-
<tr><td>RAM: 8 GB</td><td>RAM: 16 GB</td></tr>
|
118 |
-
<tr><td>GPU: NVIDIA GeForce GTX 660 or AMD Radeon HD 7850</td><td>GPU: NVIDIA GeForce RTX 2060 or AMD Radeon RX 5600 XT</td></tr>
|
119 |
-
<tr><td>DirectX: Version 12</td><td>DirectX: Version 12</td></tr>
|
120 |
-
<tr><td>Storage: 50 GB</td><td>Storage: 50 GB</td></tr>
|
121 |
-
</table>
|
122 |
-
<h3>Q: Can I play FIFA 23 offline?</h3>
|
123 |
-
<p>A: Yes, you can play FIFA 23 offline in some modes, such as Kick Off, Career Mode, Tournament Mode, and Skill Games. However, you need an internet connection to play other modes, such as FUT, Volta Football, Online Seasons, Online Friendlies, and Co-Op Seasons.</p>
|
124 |
-
<h3>Q: Can I transfer my progress and data from FIFA 22 to FIFA 23?</h3>
|
125 |
-
<p>A: No, you cannot transfer your progress and data from FIFA 22 to FIFA 23. However, you can carry over some items from FUT 22 to FUT 23, such as your club name, XP level, FIFA Points, and FUT Champions points.</p> 401be4b1e0<br />
|
126 |
-
<br />
|
127 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/247Readings/README/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: README
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: blue
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Edit this `README.md` markdown file to author your organization card 🔥
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/components/ui/sheet.tsx
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as SheetPrimitive from '@radix-ui/react-dialog'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
import { IconClose } from '@/components/ui/icons'
|
8 |
-
|
9 |
-
const Sheet = SheetPrimitive.Root
|
10 |
-
|
11 |
-
const SheetTrigger = SheetPrimitive.Trigger
|
12 |
-
|
13 |
-
const SheetClose = SheetPrimitive.Close
|
14 |
-
|
15 |
-
const SheetPortal = ({
|
16 |
-
className,
|
17 |
-
children,
|
18 |
-
...props
|
19 |
-
}: SheetPrimitive.DialogPortalProps) => (
|
20 |
-
<SheetPrimitive.Portal
|
21 |
-
className={cn('fixed inset-0 z-50 flex', className)}
|
22 |
-
{...props}
|
23 |
-
>
|
24 |
-
{children}
|
25 |
-
</SheetPrimitive.Portal>
|
26 |
-
)
|
27 |
-
SheetPortal.displayName = SheetPrimitive.Portal.displayName
|
28 |
-
|
29 |
-
const SheetOverlay = React.forwardRef<
|
30 |
-
React.ElementRef<typeof SheetPrimitive.Overlay>,
|
31 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Overlay>
|
32 |
-
>(({ className, children, ...props }, ref) => (
|
33 |
-
<SheetPrimitive.Overlay
|
34 |
-
className={cn(
|
35 |
-
'fixed inset-0 z-50 transition-all duration-100 data-[state=closed]:animate-out data-[state=closed]:fade-out data-[state=open]:fade-in',
|
36 |
-
className
|
37 |
-
)}
|
38 |
-
{...props}
|
39 |
-
ref={ref}
|
40 |
-
/>
|
41 |
-
))
|
42 |
-
SheetOverlay.displayName = SheetPrimitive.Overlay.displayName
|
43 |
-
|
44 |
-
const SheetContent = React.forwardRef<
|
45 |
-
React.ElementRef<typeof SheetPrimitive.Content>,
|
46 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Content>
|
47 |
-
>(({ className, children, ...props }, ref) => (
|
48 |
-
<SheetPortal>
|
49 |
-
<SheetPrimitive.Content
|
50 |
-
ref={ref}
|
51 |
-
className={cn(
|
52 |
-
'fixed inset-y-0 left-0 z-50 h-full border-r bg-background p-6 shadow-lg transition ease-in-out data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:slide-out-to-left data-[state=open]:slide-in-from-left data-[state=closed]:duration-300 data-[state=open]:duration-500 sm:max-w-sm',
|
53 |
-
className
|
54 |
-
)}
|
55 |
-
{...props}
|
56 |
-
>
|
57 |
-
{children}
|
58 |
-
<SheetPrimitive.Close className="absolute right-4 top-4 rounded-sm opacity-70 ring-offset-background transition-opacity hover:opacity-100 focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:pointer-events-none data-[state=open]:bg-secondary">
|
59 |
-
<IconClose />
|
60 |
-
<span className="sr-only">Close</span>
|
61 |
-
</SheetPrimitive.Close>
|
62 |
-
</SheetPrimitive.Content>
|
63 |
-
</SheetPortal>
|
64 |
-
))
|
65 |
-
SheetContent.displayName = SheetPrimitive.Content.displayName
|
66 |
-
|
67 |
-
const SheetHeader = ({
|
68 |
-
className,
|
69 |
-
...props
|
70 |
-
}: React.HTMLAttributes<HTMLDivElement>) => (
|
71 |
-
<div className={cn('flex flex-col space-y-2', className)} {...props} />
|
72 |
-
)
|
73 |
-
SheetHeader.displayName = 'SheetHeader'
|
74 |
-
|
75 |
-
const SheetFooter = ({
|
76 |
-
className,
|
77 |
-
...props
|
78 |
-
}: React.HTMLAttributes<HTMLDivElement>) => (
|
79 |
-
<div
|
80 |
-
className={cn(
|
81 |
-
'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2',
|
82 |
-
className
|
83 |
-
)}
|
84 |
-
{...props}
|
85 |
-
/>
|
86 |
-
)
|
87 |
-
SheetFooter.displayName = 'SheetFooter'
|
88 |
-
|
89 |
-
const SheetTitle = React.forwardRef<
|
90 |
-
React.ElementRef<typeof SheetPrimitive.Title>,
|
91 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Title>
|
92 |
-
>(({ className, ...props }, ref) => (
|
93 |
-
<SheetPrimitive.Title
|
94 |
-
ref={ref}
|
95 |
-
className={cn('text-lg font-semibold text-foreground', className)}
|
96 |
-
{...props}
|
97 |
-
/>
|
98 |
-
))
|
99 |
-
SheetTitle.displayName = SheetPrimitive.Title.displayName
|
100 |
-
|
101 |
-
const SheetDescription = React.forwardRef<
|
102 |
-
React.ElementRef<typeof SheetPrimitive.Description>,
|
103 |
-
React.ComponentPropsWithoutRef<typeof SheetPrimitive.Description>
|
104 |
-
>(({ className, ...props }, ref) => (
|
105 |
-
<SheetPrimitive.Description
|
106 |
-
ref={ref}
|
107 |
-
className={cn('text-sm text-muted-foreground', className)}
|
108 |
-
{...props}
|
109 |
-
/>
|
110 |
-
))
|
111 |
-
SheetDescription.displayName = SheetPrimitive.Description.displayName
|
112 |
-
|
113 |
-
export {
|
114 |
-
Sheet,
|
115 |
-
SheetTrigger,
|
116 |
-
SheetClose,
|
117 |
-
SheetContent,
|
118 |
-
SheetHeader,
|
119 |
-
SheetFooter,
|
120 |
-
SheetTitle,
|
121 |
-
SheetDescription
|
122 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A666sxr/Genshin_TTS/inference_api.py
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import commons
|
3 |
-
import utils
|
4 |
-
from models import SynthesizerTrn
|
5 |
-
from text.symbols import symbols
|
6 |
-
from text import text_to_sequence
|
7 |
-
import io
|
8 |
-
from scipy.io.wavfile import write
|
9 |
-
|
10 |
-
from flask import Flask, request
|
11 |
-
import threading
|
12 |
-
app = Flask(__name__)
|
13 |
-
mutex = threading.Lock()
|
14 |
-
|
15 |
-
def get_text(text, hps):
|
16 |
-
text_norm = text_to_sequence(text, hps.data.text_cleaners)
|
17 |
-
if hps.data.add_blank:
|
18 |
-
text_norm = commons.intersperse(text_norm, 0)
|
19 |
-
text_norm = torch.LongTensor(text_norm)
|
20 |
-
return text_norm
|
21 |
-
hps = utils.get_hparams_from_file("./configs/ljs_mb_istft_vits.json")
|
22 |
-
net_g = SynthesizerTrn(
|
23 |
-
len(symbols),
|
24 |
-
hps.data.filter_length // 2 + 1,
|
25 |
-
hps.train.segment_size // hps.data.hop_length,
|
26 |
-
**hps.model)
|
27 |
-
_ = net_g.eval()
|
28 |
-
|
29 |
-
# _ = utils.load_checkpoint("../tempbest.pth", net_g, None)
|
30 |
-
import time
|
31 |
-
|
32 |
-
|
33 |
-
def tts(txt):
|
34 |
-
audio = None
|
35 |
-
if mutex.acquire(blocking=False):
|
36 |
-
try:
|
37 |
-
stn_tst = get_text(txt, hps)
|
38 |
-
with torch.no_grad():
|
39 |
-
x_tst = stn_tst.unsqueeze(0)
|
40 |
-
x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
|
41 |
-
t1 = time.time()
|
42 |
-
audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8,
|
43 |
-
length_scale=1)[0][0, 0].data.float().numpy()
|
44 |
-
t2 = time.time()
|
45 |
-
print("推理时间:", (t2 - t1), "s")
|
46 |
-
finally:
|
47 |
-
mutex.release()
|
48 |
-
return audio
|
49 |
-
|
50 |
-
@app.route('/tts')
|
51 |
-
def text_api():
|
52 |
-
text = request.args.get('text','')
|
53 |
-
bytes_wav = bytes()
|
54 |
-
byte_io = io.BytesIO(bytes_wav)
|
55 |
-
audio = tts(text)
|
56 |
-
if audio is None:
|
57 |
-
return "服务器忙"
|
58 |
-
write(byte_io, 22050, audio)
|
59 |
-
wav_bytes = byte_io.read()
|
60 |
-
|
61 |
-
# audio_data = base64.b64encode(wav_bytes).decode('UTF-8')
|
62 |
-
return wav_bytes, 200, {'Content-Type': 'audio/wav'}
|
63 |
-
|
64 |
-
|
65 |
-
if __name__ == '__main__':
|
66 |
-
app.run("0.0.0.0", 8080)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/uvr5_pack/lib_v5/nets_123812KB.py
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from uvr5_pack.lib_v5 import layers_123821KB as layers
|
6 |
-
|
7 |
-
|
8 |
-
class BaseASPPNet(nn.Module):
|
9 |
-
def __init__(self, nin, ch, dilations=(4, 8, 16)):
|
10 |
-
super(BaseASPPNet, self).__init__()
|
11 |
-
self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
|
12 |
-
self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
|
13 |
-
self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
|
14 |
-
self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
|
15 |
-
|
16 |
-
self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
|
17 |
-
|
18 |
-
self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
|
19 |
-
self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
|
20 |
-
self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
|
21 |
-
self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
|
22 |
-
|
23 |
-
def __call__(self, x):
|
24 |
-
h, e1 = self.enc1(x)
|
25 |
-
h, e2 = self.enc2(h)
|
26 |
-
h, e3 = self.enc3(h)
|
27 |
-
h, e4 = self.enc4(h)
|
28 |
-
|
29 |
-
h = self.aspp(h)
|
30 |
-
|
31 |
-
h = self.dec4(h, e4)
|
32 |
-
h = self.dec3(h, e3)
|
33 |
-
h = self.dec2(h, e2)
|
34 |
-
h = self.dec1(h, e1)
|
35 |
-
|
36 |
-
return h
|
37 |
-
|
38 |
-
|
39 |
-
class CascadedASPPNet(nn.Module):
|
40 |
-
def __init__(self, n_fft):
|
41 |
-
super(CascadedASPPNet, self).__init__()
|
42 |
-
self.stg1_low_band_net = BaseASPPNet(2, 32)
|
43 |
-
self.stg1_high_band_net = BaseASPPNet(2, 32)
|
44 |
-
|
45 |
-
self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
|
46 |
-
self.stg2_full_band_net = BaseASPPNet(16, 32)
|
47 |
-
|
48 |
-
self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
|
49 |
-
self.stg3_full_band_net = BaseASPPNet(32, 64)
|
50 |
-
|
51 |
-
self.out = nn.Conv2d(64, 2, 1, bias=False)
|
52 |
-
self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
|
53 |
-
self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
|
54 |
-
|
55 |
-
self.max_bin = n_fft // 2
|
56 |
-
self.output_bin = n_fft // 2 + 1
|
57 |
-
|
58 |
-
self.offset = 128
|
59 |
-
|
60 |
-
def forward(self, x, aggressiveness=None):
|
61 |
-
mix = x.detach()
|
62 |
-
x = x.clone()
|
63 |
-
|
64 |
-
x = x[:, :, : self.max_bin]
|
65 |
-
|
66 |
-
bandw = x.size()[2] // 2
|
67 |
-
aux1 = torch.cat(
|
68 |
-
[
|
69 |
-
self.stg1_low_band_net(x[:, :, :bandw]),
|
70 |
-
self.stg1_high_band_net(x[:, :, bandw:]),
|
71 |
-
],
|
72 |
-
dim=2,
|
73 |
-
)
|
74 |
-
|
75 |
-
h = torch.cat([x, aux1], dim=1)
|
76 |
-
aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
|
77 |
-
|
78 |
-
h = torch.cat([x, aux1, aux2], dim=1)
|
79 |
-
h = self.stg3_full_band_net(self.stg3_bridge(h))
|
80 |
-
|
81 |
-
mask = torch.sigmoid(self.out(h))
|
82 |
-
mask = F.pad(
|
83 |
-
input=mask,
|
84 |
-
pad=(0, 0, 0, self.output_bin - mask.size()[2]),
|
85 |
-
mode="replicate",
|
86 |
-
)
|
87 |
-
|
88 |
-
if self.training:
|
89 |
-
aux1 = torch.sigmoid(self.aux1_out(aux1))
|
90 |
-
aux1 = F.pad(
|
91 |
-
input=aux1,
|
92 |
-
pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
|
93 |
-
mode="replicate",
|
94 |
-
)
|
95 |
-
aux2 = torch.sigmoid(self.aux2_out(aux2))
|
96 |
-
aux2 = F.pad(
|
97 |
-
input=aux2,
|
98 |
-
pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
|
99 |
-
mode="replicate",
|
100 |
-
)
|
101 |
-
return mask * mix, aux1 * mix, aux2 * mix
|
102 |
-
else:
|
103 |
-
if aggressiveness:
|
104 |
-
mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
|
105 |
-
mask[:, :, : aggressiveness["split_bin"]],
|
106 |
-
1 + aggressiveness["value"] / 3,
|
107 |
-
)
|
108 |
-
mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
|
109 |
-
mask[:, :, aggressiveness["split_bin"] :],
|
110 |
-
1 + aggressiveness["value"],
|
111 |
-
)
|
112 |
-
|
113 |
-
return mask * mix
|
114 |
-
|
115 |
-
def predict(self, x_mag, aggressiveness=None):
|
116 |
-
h = self.forward(x_mag, aggressiveness)
|
117 |
-
|
118 |
-
if self.offset > 0:
|
119 |
-
h = h[:, :, :, self.offset : -self.offset]
|
120 |
-
assert h.size()[3] > 0
|
121 |
-
|
122 |
-
return h
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/__init__.py
DELETED
File without changes
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_melception.py
DELETED
@@ -1,241 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import torchvision
|
6 |
-
from omegaconf import OmegaConf
|
7 |
-
from torch.utils.data.dataloader import DataLoader
|
8 |
-
from torchvision.models.inception import BasicConv2d, Inception3
|
9 |
-
from tqdm import tqdm
|
10 |
-
|
11 |
-
from dataset import VGGSound
|
12 |
-
from logger import LoggerWithTBoard
|
13 |
-
from loss import WeightedCrossEntropy
|
14 |
-
from metrics import metrics
|
15 |
-
from transforms import Crop, StandardNormalizeAudio, ToTensor
|
16 |
-
|
17 |
-
|
18 |
-
# TODO: refactor ./evaluation/feature_extractors/melception.py to handle this class as well.
|
19 |
-
# So far couldn't do it because of the difference in outputs
|
20 |
-
class Melception(Inception3):
|
21 |
-
|
22 |
-
def __init__(self, num_classes, **kwargs):
|
23 |
-
# inception = Melception(num_classes=309)
|
24 |
-
super().__init__(num_classes=num_classes, **kwargs)
|
25 |
-
# the same as https://github.com/pytorch/vision/blob/5339e63148/torchvision/models/inception.py#L95
|
26 |
-
# but for 1-channel input instead of RGB.
|
27 |
-
self.Conv2d_1a_3x3 = BasicConv2d(1, 32, kernel_size=3, stride=2)
|
28 |
-
# also the 'hight' of the mel spec is 80 (vs 299 in RGB) we remove all max pool from Inception
|
29 |
-
self.maxpool1 = torch.nn.Identity()
|
30 |
-
self.maxpool2 = torch.nn.Identity()
|
31 |
-
|
32 |
-
def forward(self, x):
|
33 |
-
x = x.unsqueeze(1)
|
34 |
-
return super().forward(x)
|
35 |
-
|
36 |
-
def train_inception_scorer(cfg):
|
37 |
-
logger = LoggerWithTBoard(cfg)
|
38 |
-
|
39 |
-
random.seed(cfg.seed)
|
40 |
-
np.random.seed(cfg.seed)
|
41 |
-
torch.manual_seed(cfg.seed)
|
42 |
-
torch.cuda.manual_seed_all(cfg.seed)
|
43 |
-
# makes iterations faster (in this case 30%) if your inputs are of a fixed size
|
44 |
-
# https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3
|
45 |
-
torch.backends.cudnn.benchmark = True
|
46 |
-
|
47 |
-
meta_path = './data/vggsound.csv'
|
48 |
-
train_ids_path = './data/vggsound_train.txt'
|
49 |
-
cache_path = './data/'
|
50 |
-
splits_path = cache_path
|
51 |
-
|
52 |
-
transforms = [
|
53 |
-
StandardNormalizeAudio(cfg.mels_path, train_ids_path, cache_path),
|
54 |
-
]
|
55 |
-
if cfg.cropped_size not in [None, 'None', 'none']:
|
56 |
-
logger.print_logger.info(f'Using cropping {cfg.cropped_size}')
|
57 |
-
transforms.append(Crop(cfg.cropped_size))
|
58 |
-
transforms.append(ToTensor())
|
59 |
-
transforms = torchvision.transforms.transforms.Compose(transforms)
|
60 |
-
|
61 |
-
datasets = {
|
62 |
-
'train': VGGSound('train', cfg.mels_path, transforms, splits_path, meta_path),
|
63 |
-
'valid': VGGSound('valid', cfg.mels_path, transforms, splits_path, meta_path),
|
64 |
-
'test': VGGSound('test', cfg.mels_path, transforms, splits_path, meta_path),
|
65 |
-
}
|
66 |
-
|
67 |
-
loaders = {
|
68 |
-
'train': DataLoader(datasets['train'], batch_size=cfg.batch_size, shuffle=True, drop_last=True,
|
69 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
70 |
-
'valid': DataLoader(datasets['valid'], batch_size=cfg.batch_size,
|
71 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
72 |
-
'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
|
73 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
74 |
-
}
|
75 |
-
|
76 |
-
device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
|
77 |
-
|
78 |
-
model = Melception(num_classes=len(datasets['train'].target2label))
|
79 |
-
model = model.to(device)
|
80 |
-
param_num = logger.log_param_num(model)
|
81 |
-
|
82 |
-
if cfg.optimizer == 'adam':
|
83 |
-
optimizer = torch.optim.Adam(
|
84 |
-
model.parameters(), lr=cfg.learning_rate, betas=cfg.betas, weight_decay=cfg.weight_decay)
|
85 |
-
elif cfg.optimizer == 'sgd':
|
86 |
-
optimizer = torch.optim.SGD(
|
87 |
-
model.parameters(), lr=cfg.learning_rate, momentum=cfg.momentum, weight_decay=cfg.weight_decay)
|
88 |
-
else:
|
89 |
-
raise NotImplementedError
|
90 |
-
|
91 |
-
if cfg.cls_weights_in_loss:
|
92 |
-
weights = 1 / datasets['train'].class_counts
|
93 |
-
else:
|
94 |
-
weights = torch.ones(len(datasets['train'].target2label))
|
95 |
-
criterion = WeightedCrossEntropy(weights.to(device))
|
96 |
-
|
97 |
-
# loop over the train and validation multiple times (typical PT boilerplate)
|
98 |
-
no_change_epochs = 0
|
99 |
-
best_valid_loss = float('inf')
|
100 |
-
early_stop_triggered = False
|
101 |
-
|
102 |
-
for epoch in range(cfg.num_epochs):
|
103 |
-
|
104 |
-
for phase in ['train', 'valid']:
|
105 |
-
if phase == 'train':
|
106 |
-
model.train()
|
107 |
-
else:
|
108 |
-
model.eval()
|
109 |
-
|
110 |
-
running_loss = 0
|
111 |
-
preds_from_each_batch = []
|
112 |
-
targets_from_each_batch = []
|
113 |
-
|
114 |
-
prog_bar = tqdm(loaders[phase], f'{phase} ({epoch})', ncols=0)
|
115 |
-
for i, batch in enumerate(prog_bar):
|
116 |
-
inputs = batch['input'].to(device)
|
117 |
-
targets = batch['target'].to(device)
|
118 |
-
|
119 |
-
# zero the parameter gradients
|
120 |
-
optimizer.zero_grad()
|
121 |
-
|
122 |
-
# forward + backward + optimize
|
123 |
-
with torch.set_grad_enabled(phase == 'train'):
|
124 |
-
# inception v3
|
125 |
-
if phase == 'train':
|
126 |
-
outputs, aux_outputs = model(inputs)
|
127 |
-
loss1 = criterion(outputs, targets)
|
128 |
-
loss2 = criterion(aux_outputs, targets)
|
129 |
-
loss = loss1 + 0.4*loss2
|
130 |
-
loss = criterion(outputs, targets, to_weight=True)
|
131 |
-
else:
|
132 |
-
outputs = model(inputs)
|
133 |
-
loss = criterion(outputs, targets, to_weight=False)
|
134 |
-
|
135 |
-
if phase == 'train':
|
136 |
-
loss.backward()
|
137 |
-
optimizer.step()
|
138 |
-
|
139 |
-
# loss
|
140 |
-
running_loss += loss.item()
|
141 |
-
|
142 |
-
# for metrics calculation later on
|
143 |
-
preds_from_each_batch += [outputs.detach().cpu()]
|
144 |
-
targets_from_each_batch += [targets.cpu()]
|
145 |
-
|
146 |
-
# iter logging
|
147 |
-
if i % 50 == 0:
|
148 |
-
logger.log_iter_loss(loss.item(), epoch*len(loaders[phase])+i, phase)
|
149 |
-
# tracks loss in the tqdm progress bar
|
150 |
-
prog_bar.set_postfix(loss=loss.item())
|
151 |
-
|
152 |
-
# logging loss
|
153 |
-
epoch_loss = running_loss / len(loaders[phase])
|
154 |
-
logger.log_epoch_loss(epoch_loss, epoch, phase)
|
155 |
-
|
156 |
-
# logging metrics
|
157 |
-
preds_from_each_batch = torch.cat(preds_from_each_batch)
|
158 |
-
targets_from_each_batch = torch.cat(targets_from_each_batch)
|
159 |
-
metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
|
160 |
-
logger.log_epoch_metrics(metrics_dict, epoch, phase)
|
161 |
-
|
162 |
-
# Early stopping
|
163 |
-
if phase == 'valid':
|
164 |
-
if epoch_loss < best_valid_loss:
|
165 |
-
no_change_epochs = 0
|
166 |
-
best_valid_loss = epoch_loss
|
167 |
-
logger.log_best_model(model, epoch_loss, epoch, optimizer, metrics_dict)
|
168 |
-
else:
|
169 |
-
no_change_epochs += 1
|
170 |
-
logger.print_logger.info(
|
171 |
-
f'Valid loss hasnt changed for {no_change_epochs} patience: {cfg.patience}'
|
172 |
-
)
|
173 |
-
if no_change_epochs >= cfg.patience:
|
174 |
-
early_stop_triggered = True
|
175 |
-
|
176 |
-
if early_stop_triggered:
|
177 |
-
logger.print_logger.info(f'Training is early stopped @ {epoch}')
|
178 |
-
break
|
179 |
-
|
180 |
-
logger.print_logger.info('Finished Training')
|
181 |
-
|
182 |
-
# loading the best model
|
183 |
-
ckpt = torch.load(logger.best_model_path)
|
184 |
-
model.load_state_dict(ckpt['model'])
|
185 |
-
logger.print_logger.info(f'Loading the best model from {logger.best_model_path}')
|
186 |
-
logger.print_logger.info((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
|
187 |
-
|
188 |
-
# Testing the model
|
189 |
-
model.eval()
|
190 |
-
running_loss = 0
|
191 |
-
preds_from_each_batch = []
|
192 |
-
targets_from_each_batch = []
|
193 |
-
|
194 |
-
for i, batch in enumerate(loaders['test']):
|
195 |
-
inputs = batch['input'].to(device)
|
196 |
-
targets = batch['target'].to(device)
|
197 |
-
|
198 |
-
# zero the parameter gradients
|
199 |
-
optimizer.zero_grad()
|
200 |
-
|
201 |
-
# forward + backward + optimize
|
202 |
-
with torch.set_grad_enabled(False):
|
203 |
-
outputs = model(inputs)
|
204 |
-
loss = criterion(outputs, targets, to_weight=False)
|
205 |
-
|
206 |
-
# loss
|
207 |
-
running_loss += loss.item()
|
208 |
-
|
209 |
-
# for metrics calculation later on
|
210 |
-
preds_from_each_batch += [outputs.detach().cpu()]
|
211 |
-
targets_from_each_batch += [targets.cpu()]
|
212 |
-
|
213 |
-
# logging metrics
|
214 |
-
preds_from_each_batch = torch.cat(preds_from_each_batch)
|
215 |
-
targets_from_each_batch = torch.cat(targets_from_each_batch)
|
216 |
-
test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
|
217 |
-
test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
|
218 |
-
test_metrics_dict['param_num'] = param_num
|
219 |
-
# TODO: I have no idea why tboard doesn't keep metrics (hparams) when
|
220 |
-
# I run this experiment from cli: `python train_melception.py config=./configs/vggish.yaml`
|
221 |
-
# while when I run it in vscode debugger the metrics are logger (wtf)
|
222 |
-
logger.log_test_metrics(test_metrics_dict, dict(cfg), ckpt['epoch'])
|
223 |
-
|
224 |
-
logger.print_logger.info('Finished the experiment')
|
225 |
-
|
226 |
-
|
227 |
-
if __name__ == '__main__':
|
228 |
-
# input = torch.rand(16, 1, 80, 848)
|
229 |
-
# output, aux = inception(input)
|
230 |
-
# print(output.shape, aux.shape)
|
231 |
-
# Expected input size: (3, 299, 299) in RGB -> (1, 80, 848) in Mel Spec
|
232 |
-
# train_inception_scorer()
|
233 |
-
|
234 |
-
cfg_cli = OmegaConf.from_cli()
|
235 |
-
cfg_yml = OmegaConf.load(cfg_cli.config)
|
236 |
-
# the latter arguments are prioritized
|
237 |
-
cfg = OmegaConf.merge(cfg_yml, cfg_cli)
|
238 |
-
OmegaConf.set_readonly(cfg, True)
|
239 |
-
print(OmegaConf.to_yaml(cfg))
|
240 |
-
|
241 |
-
train_inception_scorer(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIZeroToHero/03-ImageSearchSimilar/app.py
DELETED
@@ -1,185 +0,0 @@
|
|
1 |
-
from html import escape
|
2 |
-
import re
|
3 |
-
import streamlit as st
|
4 |
-
import pandas as pd, numpy as np
|
5 |
-
from transformers import CLIPProcessor, CLIPModel
|
6 |
-
from st_clickable_images import clickable_images
|
7 |
-
|
8 |
-
@st.cache(
|
9 |
-
show_spinner=False,
|
10 |
-
hash_funcs={
|
11 |
-
CLIPModel: lambda _: None,
|
12 |
-
CLIPProcessor: lambda _: None,
|
13 |
-
dict: lambda _: None,
|
14 |
-
},
|
15 |
-
)
|
16 |
-
def load():
|
17 |
-
model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
|
18 |
-
processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
|
19 |
-
df = {0: pd.read_csv("data.csv"), 1: pd.read_csv("data2.csv")}
|
20 |
-
embeddings = {0: np.load("embeddings.npy"), 1: np.load("embeddings2.npy")}
|
21 |
-
for k in [0, 1]:
|
22 |
-
embeddings[k] = embeddings[k] / np.linalg.norm(
|
23 |
-
embeddings[k], axis=1, keepdims=True
|
24 |
-
)
|
25 |
-
return model, processor, df, embeddings
|
26 |
-
|
27 |
-
|
28 |
-
model, processor, df, embeddings = load()
|
29 |
-
source = {0: "\nSource: Unsplash", 1: "\nSource: The Movie Database (TMDB)"}
|
30 |
-
|
31 |
-
|
32 |
-
def compute_text_embeddings(list_of_strings):
|
33 |
-
inputs = processor(text=list_of_strings, return_tensors="pt", padding=True)
|
34 |
-
result = model.get_text_features(**inputs).detach().numpy()
|
35 |
-
return result / np.linalg.norm(result, axis=1, keepdims=True)
|
36 |
-
|
37 |
-
|
38 |
-
def image_search(query, corpus, n_results=24):
|
39 |
-
positive_embeddings = None
|
40 |
-
|
41 |
-
def concatenate_embeddings(e1, e2):
|
42 |
-
if e1 is None:
|
43 |
-
return e2
|
44 |
-
else:
|
45 |
-
return np.concatenate((e1, e2), axis=0)
|
46 |
-
|
47 |
-
splitted_query = query.split("EXCLUDING ")
|
48 |
-
dot_product = 0
|
49 |
-
k = 0 if corpus == "Unsplash" else 1
|
50 |
-
if len(splitted_query[0]) > 0:
|
51 |
-
positive_queries = splitted_query[0].split(";")
|
52 |
-
for positive_query in positive_queries:
|
53 |
-
match = re.match(r"\[(Movies|Unsplash):(\d{1,5})\](.*)", positive_query)
|
54 |
-
if match:
|
55 |
-
corpus2, idx, remainder = match.groups()
|
56 |
-
idx, remainder = int(idx), remainder.strip()
|
57 |
-
k2 = 0 if corpus2 == "Unsplash" else 1
|
58 |
-
positive_embeddings = concatenate_embeddings(
|
59 |
-
positive_embeddings, embeddings[k2][idx : idx + 1, :]
|
60 |
-
)
|
61 |
-
if len(remainder) > 0:
|
62 |
-
positive_embeddings = concatenate_embeddings(
|
63 |
-
positive_embeddings, compute_text_embeddings([remainder])
|
64 |
-
)
|
65 |
-
else:
|
66 |
-
positive_embeddings = concatenate_embeddings(
|
67 |
-
positive_embeddings, compute_text_embeddings([positive_query])
|
68 |
-
)
|
69 |
-
dot_product = embeddings[k] @ positive_embeddings.T
|
70 |
-
dot_product = dot_product - np.median(dot_product, axis=0)
|
71 |
-
dot_product = dot_product / np.max(dot_product, axis=0, keepdims=True)
|
72 |
-
dot_product = np.min(dot_product, axis=1)
|
73 |
-
|
74 |
-
if len(splitted_query) > 1:
|
75 |
-
negative_queries = (" ".join(splitted_query[1:])).split(";")
|
76 |
-
negative_embeddings = compute_text_embeddings(negative_queries)
|
77 |
-
dot_product2 = embeddings[k] @ negative_embeddings.T
|
78 |
-
dot_product2 = dot_product2 - np.median(dot_product2, axis=0)
|
79 |
-
dot_product2 = dot_product2 / np.max(dot_product2, axis=0, keepdims=True)
|
80 |
-
dot_product -= np.max(np.maximum(dot_product2, 0), axis=1)
|
81 |
-
|
82 |
-
results = np.argsort(dot_product)[-1 : -n_results - 1 : -1]
|
83 |
-
return [
|
84 |
-
(
|
85 |
-
df[k].iloc[i]["path"],
|
86 |
-
df[k].iloc[i]["tooltip"] + source[k],
|
87 |
-
i,
|
88 |
-
)
|
89 |
-
for i in results
|
90 |
-
]
|
91 |
-
|
92 |
-
|
93 |
-
description = """
|
94 |
-
# Semantic image search
|
95 |
-
**Enter your query and hit enter**
|
96 |
-
"""
|
97 |
-
|
98 |
-
howto = """
|
99 |
-
- Click image to find similar images
|
100 |
-
- Use "**;**" to combine multiple queries)
|
101 |
-
- Use "**EXCLUDING**", to exclude a query
|
102 |
-
"""
|
103 |
-
|
104 |
-
|
105 |
-
def main():
|
106 |
-
st.markdown(
|
107 |
-
"""
|
108 |
-
<style>
|
109 |
-
.block-container{
|
110 |
-
max-width: 1200px;
|
111 |
-
}
|
112 |
-
div.row-widget.stRadio > div{
|
113 |
-
flex-direction:row;
|
114 |
-
display: flex;
|
115 |
-
justify-content: center;
|
116 |
-
}
|
117 |
-
div.row-widget.stRadio > div > label{
|
118 |
-
margin-left: 5px;
|
119 |
-
margin-right: 5px;
|
120 |
-
}
|
121 |
-
section.main>div:first-child {
|
122 |
-
padding-top: 0px;
|
123 |
-
}
|
124 |
-
section:not(.main)>div:first-child {
|
125 |
-
padding-top: 30px;
|
126 |
-
}
|
127 |
-
div.reportview-container > section:first-child{
|
128 |
-
max-width: 320px;
|
129 |
-
}
|
130 |
-
#MainMenu {
|
131 |
-
visibility: hidden;
|
132 |
-
}
|
133 |
-
footer {
|
134 |
-
visibility: hidden;
|
135 |
-
}
|
136 |
-
</style>""",
|
137 |
-
unsafe_allow_html=True,
|
138 |
-
)
|
139 |
-
st.sidebar.markdown(description)
|
140 |
-
with st.sidebar.expander("Advanced use"):
|
141 |
-
st.markdown(howto)
|
142 |
-
|
143 |
-
|
144 |
-
st.sidebar.markdown(f"Unsplash has categories that match: backgrounds, photos, nature, iphone, etc")
|
145 |
-
st.sidebar.markdown(f"Unsplash images contain animals, apps, events, feelings, food, travel, nature, people, religion, sports, things, stock")
|
146 |
-
st.sidebar.markdown(f"Unsplash things include flag, tree, clock, money, tattoo, arrow, book, car, fireworks, ghost, health, kiss, dance, balloon, crown, eye, house, music, airplane, lighthouse, typewriter, toys")
|
147 |
-
st.sidebar.markdown(f"unsplash feelings include funny, heart, love, cool, congratulations, love, scary, cute, friendship, inspirational, hug, sad, cursed, beautiful, crazy, respect, transformation, peaceful, happy")
|
148 |
-
st.sidebar.markdown(f"unsplash people contain baby, life, women, family, girls, pregnancy, society, old people, musician, attractive, bohemian")
|
149 |
-
st.sidebar.markdown(f"imagenet queries include: photo of, photo of many, sculpture of, rendering of, graffiti of, tattoo of, embroidered, drawing of, plastic, black and white, painting, video game, doodle, origami, sketch, etc")
|
150 |
-
|
151 |
-
|
152 |
-
_, c, _ = st.columns((1, 3, 1))
|
153 |
-
if "query" in st.session_state:
|
154 |
-
query = c.text_input("", value=st.session_state["query"])
|
155 |
-
else:
|
156 |
-
|
157 |
-
query = c.text_input("", value="lighthouse")
|
158 |
-
corpus = st.radio("", ["Unsplash"])
|
159 |
-
#corpus = st.radio("", ["Unsplash", "Movies"])
|
160 |
-
if len(query) > 0:
|
161 |
-
results = image_search(query, corpus)
|
162 |
-
clicked = clickable_images(
|
163 |
-
[result[0] for result in results],
|
164 |
-
titles=[result[1] for result in results],
|
165 |
-
div_style={
|
166 |
-
"display": "flex",
|
167 |
-
"justify-content": "center",
|
168 |
-
"flex-wrap": "wrap",
|
169 |
-
},
|
170 |
-
img_style={"margin": "2px", "height": "200px"},
|
171 |
-
)
|
172 |
-
if clicked >= 0:
|
173 |
-
change_query = False
|
174 |
-
if "last_clicked" not in st.session_state:
|
175 |
-
change_query = True
|
176 |
-
else:
|
177 |
-
if clicked != st.session_state["last_clicked"]:
|
178 |
-
change_query = True
|
179 |
-
if change_query:
|
180 |
-
st.session_state["query"] = f"[{corpus}:{results[clicked][2]}]"
|
181 |
-
st.experimental_rerun()
|
182 |
-
|
183 |
-
|
184 |
-
if __name__ == "__main__":
|
185 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ababababababbababa/Arabic_poem_classifier/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Arabic_poem_classifier
|
3 |
-
emoji: 👁
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.9
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
duplicated_from: Yah216/Arabic_poem_classifier
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/conversation/[id]/share/+server.ts
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import { base } from "$app/paths";
|
2 |
-
import { PUBLIC_ORIGIN, PUBLIC_SHARE_PREFIX } from "$env/static/public";
|
3 |
-
import { authCondition } from "$lib/server/auth";
|
4 |
-
import { collections } from "$lib/server/database";
|
5 |
-
import type { SharedConversation } from "$lib/types/SharedConversation";
|
6 |
-
import { hashConv } from "$lib/utils/hashConv.js";
|
7 |
-
import { error } from "@sveltejs/kit";
|
8 |
-
import { nanoid } from "nanoid";
|
9 |
-
|
10 |
-
export async function POST({ params, url, locals }) {
|
11 |
-
/*const conversation = await collections.conversations.findOne({
|
12 |
-
_id: new ObjectId(params.id),
|
13 |
-
...authCondition(locals),
|
14 |
-
});
|
15 |
-
|
16 |
-
const hash = await hashConv(conversation);
|
17 |
-
|
18 |
-
const existingShare = await collections.sharedConversations.findOne({ hash });
|
19 |
-
|
20 |
-
if (existingShare) {
|
21 |
-
return new Response(
|
22 |
-
JSON.stringify({
|
23 |
-
url: getShareUrl(url, existingShare._id),
|
24 |
-
}),
|
25 |
-
{ headers: { "Content-Type": "application/json" } }
|
26 |
-
);
|
27 |
-
}
|
28 |
-
|
29 |
-
const shared: SharedConversation = {
|
30 |
-
_id: nanoid(7),
|
31 |
-
createdAt: new Date(),
|
32 |
-
messages: conversation.messages,
|
33 |
-
hash,
|
34 |
-
updatedAt: new Date(),
|
35 |
-
title: conversation.title,
|
36 |
-
model: conversation.model,
|
37 |
-
};
|
38 |
-
|
39 |
-
await collections.sharedConversations.insertOne(shared);
|
40 |
-
|
41 |
-
return new Response(
|
42 |
-
JSON.stringify({
|
43 |
-
url: getShareUrl(url, shared._id),
|
44 |
-
}),
|
45 |
-
{ headers: { "Content-Type": "application/json" } }
|
46 |
-
);*/
|
47 |
-
|
48 |
-
return new Response(
|
49 |
-
JSON.stringify({
|
50 |
-
url: "",
|
51 |
-
}),
|
52 |
-
{ headers: { "Content-Type": "application/json" } }
|
53 |
-
);
|
54 |
-
}
|
55 |
-
|
56 |
-
function getShareUrl(url: URL, shareId: string): string {
|
57 |
-
return `${PUBLIC_SHARE_PREFIX || `${PUBLIC_ORIGIN || url.origin}${base}`}/r/${shareId}`;
|
58 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptX.py
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import re
|
4 |
-
import json
|
5 |
-
|
6 |
-
from aiohttp import ClientSession
|
7 |
-
from ..typing import AsyncResult, Messages
|
8 |
-
from .base_provider import AsyncGeneratorProvider
|
9 |
-
from .helper import format_prompt
|
10 |
-
|
11 |
-
|
12 |
-
class ChatgptX(AsyncGeneratorProvider):
|
13 |
-
url = "https://chatgptx.de"
|
14 |
-
supports_gpt_35_turbo = True
|
15 |
-
working = True
|
16 |
-
|
17 |
-
@classmethod
|
18 |
-
async def create_async_generator(
|
19 |
-
cls,
|
20 |
-
model: str,
|
21 |
-
messages: Messages,
|
22 |
-
**kwargs
|
23 |
-
) -> AsyncResult:
|
24 |
-
headers = {
|
25 |
-
'accept-language': 'de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US',
|
26 |
-
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
|
27 |
-
'sec-ch-ua-mobile': '?0',
|
28 |
-
'sec-ch-ua-platform': 'Linux',
|
29 |
-
'sec-fetch-dest': 'empty',
|
30 |
-
'sec-fetch-mode': 'cors',
|
31 |
-
'sec-fetch-site': 'same-origin',
|
32 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
|
33 |
-
}
|
34 |
-
async with ClientSession(headers=headers) as session:
|
35 |
-
async with session.get(f"{cls.url}/") as response:
|
36 |
-
response = await response.text()
|
37 |
-
result = re.search(r'<meta name="csrf-token" content="(.*?)"', response)
|
38 |
-
if result:
|
39 |
-
csrf_token = result.group(1)
|
40 |
-
result = re.search(r"openconversions\('(.*?)'\)", response)
|
41 |
-
if result:
|
42 |
-
chat_id = result.group(1)
|
43 |
-
result = re.search(r'<input type="hidden" id="user_id" value="(.*?)"', response)
|
44 |
-
if result:
|
45 |
-
user_id = result.group(1)
|
46 |
-
|
47 |
-
if not csrf_token or not chat_id or not user_id:
|
48 |
-
raise RuntimeError("Missing csrf_token, chat_id or user_id")
|
49 |
-
|
50 |
-
data = {
|
51 |
-
'_token': csrf_token,
|
52 |
-
'user_id': user_id,
|
53 |
-
'chats_id': chat_id,
|
54 |
-
'prompt': format_prompt(messages),
|
55 |
-
'current_model': "gpt3"
|
56 |
-
}
|
57 |
-
headers = {
|
58 |
-
'authority': 'chatgptx.de',
|
59 |
-
'accept': 'application/json, text/javascript, */*; q=0.01',
|
60 |
-
'origin': cls.url,
|
61 |
-
'referer': f'{cls.url}/',
|
62 |
-
'x-csrf-token': csrf_token,
|
63 |
-
'x-requested-with': 'XMLHttpRequest'
|
64 |
-
}
|
65 |
-
async with session.post(cls.url + '/sendchat', data=data, headers=headers) as response:
|
66 |
-
response.raise_for_status()
|
67 |
-
chat = await response.json()
|
68 |
-
if "response" not in chat or not chat["response"]:
|
69 |
-
raise RuntimeError(f'Response: {chat}')
|
70 |
-
headers = {
|
71 |
-
'authority': 'chatgptx.de',
|
72 |
-
'accept': 'text/event-stream',
|
73 |
-
'referer': f'{cls.url}/',
|
74 |
-
'x-csrf-token': csrf_token,
|
75 |
-
'x-requested-with': 'XMLHttpRequest'
|
76 |
-
}
|
77 |
-
data = {
|
78 |
-
"user_id": user_id,
|
79 |
-
"chats_id": chat_id,
|
80 |
-
"prompt": format_prompt(messages),
|
81 |
-
"current_model": "gpt3",
|
82 |
-
"conversions_id": chat["conversions_id"],
|
83 |
-
"ass_conversions_id": chat["ass_conversions_id"],
|
84 |
-
}
|
85 |
-
async with session.get(f'{cls.url}/chats_stream', params=data, headers=headers) as response:
|
86 |
-
response.raise_for_status()
|
87 |
-
async for line in response.content:
|
88 |
-
if line.startswith(b"data: "):
|
89 |
-
row = line[6:-1]
|
90 |
-
if row == b"[DONE]":
|
91 |
-
break
|
92 |
-
try:
|
93 |
-
content = json.loads(row)["choices"][0]["delta"].get("content")
|
94 |
-
except:
|
95 |
-
raise RuntimeError(f"Broken line: {line.decode()}")
|
96 |
-
if content:
|
97 |
-
yield content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdVisual/MaskCut/connectionManager.py
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
from fastapi import WebSocket
|
2 |
-
|
3 |
-
from datetime import datetime
|
4 |
-
from typing import List
|
5 |
-
|
6 |
-
class Connection:
|
7 |
-
websocket: WebSocket
|
8 |
-
connection_time: datetime
|
9 |
-
|
10 |
-
def __init__(self, websocket: WebSocket, connection_time: datetime):
|
11 |
-
self.websocket = websocket
|
12 |
-
self.connection_time = connection_time
|
13 |
-
|
14 |
-
class ConnectionManager:
|
15 |
-
timeout = 60 * 5 # 5 minutes
|
16 |
-
|
17 |
-
def __init__(self):
|
18 |
-
self.active_connections: List[Connection] = []
|
19 |
-
|
20 |
-
async def connect(self, websocket: WebSocket):
|
21 |
-
print('Connecting')
|
22 |
-
await websocket.accept()
|
23 |
-
# Add connection time and websocket to active connections
|
24 |
-
self.active_connections.append(Connection(websocket=websocket, connection_time=datetime.now()))
|
25 |
-
|
26 |
-
def isConnected(self, websocket: WebSocket):
|
27 |
-
for connection in self.active_connections:
|
28 |
-
if connection.websocket == websocket:
|
29 |
-
return True
|
30 |
-
return False
|
31 |
-
|
32 |
-
def shouldDisconnect(self, websocket: WebSocket):
|
33 |
-
for connection in self.active_connections:
|
34 |
-
if connection.websocket == websocket:
|
35 |
-
if (datetime.now() - connection.connection_time).total_seconds() > self.timeout:
|
36 |
-
print('Disconnecting...')
|
37 |
-
return True
|
38 |
-
return False
|
39 |
-
|
40 |
-
async def receive_json(self, websocket: WebSocket):
|
41 |
-
if not self.isConnected(websocket):
|
42 |
-
return None
|
43 |
-
print('Receiving...')
|
44 |
-
data = await websocket.receive_json()
|
45 |
-
print('Received')
|
46 |
-
return data
|
47 |
-
|
48 |
-
def disconnect(self, websocket: WebSocket):
|
49 |
-
print('Disconnecting...')
|
50 |
-
for connection in self.active_connections:
|
51 |
-
if connection.websocket == websocket:
|
52 |
-
self.active_connections.remove(connection)
|
53 |
-
return True
|
54 |
-
return False
|
55 |
-
|
56 |
-
async def send_json(self, json, websocket: WebSocket):
|
57 |
-
print('Sending JSON...')
|
58 |
-
# Only send the message if the connection is still active
|
59 |
-
if self.isConnected(websocket):
|
60 |
-
await websocket.send_json(json)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/data/__init__.py
DELETED
File without changes
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/ClickOutsideMethods.js
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
import ClickOutside from '../clickoutside/ClickOutside.js';
|
2 |
-
|
3 |
-
export default {
|
4 |
-
onClickOutside(gameObject, callback, scope, config) {
|
5 |
-
if (!gameObject) {
|
6 |
-
return this;
|
7 |
-
}
|
8 |
-
|
9 |
-
if (typeof (gameObject) === 'function') {
|
10 |
-
config = scope;
|
11 |
-
scope = callback;
|
12 |
-
callback = gameObject;
|
13 |
-
gameObject = this;
|
14 |
-
}
|
15 |
-
|
16 |
-
if (gameObject._clickOutside === undefined) {
|
17 |
-
gameObject._clickOutside = new ClickOutside(gameObject, config);
|
18 |
-
}
|
19 |
-
gameObject._clickOutside.on('clickoutside', callback, scope);
|
20 |
-
|
21 |
-
return this;
|
22 |
-
},
|
23 |
-
|
24 |
-
offClickOutside(gameObject, callback, scope) {
|
25 |
-
if (typeof (gameObject) === 'function') {
|
26 |
-
scope = callback;
|
27 |
-
callback = gameObject;
|
28 |
-
gameObject = this;
|
29 |
-
}
|
30 |
-
|
31 |
-
if (gameObject._clickOutside === undefined) {
|
32 |
-
return this;
|
33 |
-
}
|
34 |
-
gameObject._clickOutside.off('clickoutside', callback, scope);
|
35 |
-
|
36 |
-
return this;
|
37 |
-
},
|
38 |
-
|
39 |
-
enableClickOutside(gameObject, enabled) {
|
40 |
-
if (gameObject && typeof (gameObject) !== 'object') {
|
41 |
-
enabled = gameObject;
|
42 |
-
gameObject = this;
|
43 |
-
}
|
44 |
-
|
45 |
-
if (gameObject._clickOutside === undefined) {
|
46 |
-
return this;
|
47 |
-
}
|
48 |
-
gameObject._clickOutside.setEnable(enabled);
|
49 |
-
|
50 |
-
return this;
|
51 |
-
},
|
52 |
-
|
53 |
-
disableClickOutside(gameObject) {
|
54 |
-
if (gameObject && typeof (gameObject) !== 'object') {
|
55 |
-
gameObject = this;
|
56 |
-
}
|
57 |
-
|
58 |
-
if (gameObject._clickOutside === undefined) {
|
59 |
-
return this;
|
60 |
-
}
|
61 |
-
gameObject._clickOutside.setEnable(false);
|
62 |
-
|
63 |
-
return this;
|
64 |
-
}
|
65 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollablePanel.js
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
import MergeStyle from './utils/MergeStyle.js';
|
2 |
-
import ScrollablePanel from '../../scrollablepanel/ScrollablePanel.js';
|
3 |
-
import CreateChild from './utils/CreateChild.js';
|
4 |
-
import ReplaceSliderConfig from './utils/ReplaceSliderConfig.js';
|
5 |
-
|
6 |
-
var CreateScrollablePanel = function (scene, data, view, styles, customBuilders) {
|
7 |
-
data = MergeStyle(data, styles);
|
8 |
-
|
9 |
-
// Replace data by child game object
|
10 |
-
CreateChild(scene, data, 'background', view, styles, customBuilders);
|
11 |
-
|
12 |
-
var panelConfig = data.panel;
|
13 |
-
if (panelConfig) {
|
14 |
-
CreateChild(scene, panelConfig, 'child', view, styles, customBuilders);
|
15 |
-
}
|
16 |
-
|
17 |
-
var sliderConfig = data.slider;
|
18 |
-
if (sliderConfig) {
|
19 |
-
ReplaceSliderConfig(scene, data.slider, view, styles, customBuilders);
|
20 |
-
|
21 |
-
var sliderButtonsConfig = sliderConfig.buttons;
|
22 |
-
if (sliderButtonsConfig) {
|
23 |
-
CreateChild(scene, sliderButtonsConfig, 'top', view, styles, customBuilders);
|
24 |
-
CreateChild(scene, sliderButtonsConfig, 'bottom', view, styles, customBuilders);
|
25 |
-
CreateChild(scene, sliderButtonsConfig, 'left', view, styles, customBuilders);
|
26 |
-
CreateChild(scene, sliderButtonsConfig, 'right', view, styles, customBuilders);
|
27 |
-
}
|
28 |
-
}
|
29 |
-
|
30 |
-
CreateChild(scene, data, 'header', styles, customBuilders);
|
31 |
-
CreateChild(scene, data, 'footer', styles, customBuilders);
|
32 |
-
|
33 |
-
var gameObject = new ScrollablePanel(scene, data);
|
34 |
-
scene.add.existing(gameObject);
|
35 |
-
return gameObject;
|
36 |
-
};
|
37 |
-
|
38 |
-
export default CreateScrollablePanel;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ResolveHeight.js
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import ResolveHeightBase from '../basesizer/ResolveHeight.js';
|
2 |
-
|
3 |
-
var ResolveHeight = function (height) {
|
4 |
-
var height = ResolveHeightBase.call(this, height);
|
5 |
-
|
6 |
-
// Get proportionLength
|
7 |
-
if ((this.proportionLength === undefined) && (this.orientation === 1)) {
|
8 |
-
var remainder = height - this.childrenHeight;
|
9 |
-
if (remainder > 0) {
|
10 |
-
remainder = height - this.getChildrenHeight(false);
|
11 |
-
this.proportionLength = remainder / this.childrenProportion;
|
12 |
-
} else {
|
13 |
-
this.proportionLength = 0;
|
14 |
-
if (remainder < 0) {
|
15 |
-
// Warning
|
16 |
-
}
|
17 |
-
}
|
18 |
-
}
|
19 |
-
|
20 |
-
return height;
|
21 |
-
}
|
22 |
-
|
23 |
-
export default ResolveHeight;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiiluoChen/webui/app.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from subprocess import getoutput
|
3 |
-
|
4 |
-
gpu_info = getoutput('nvidia-smi')
|
5 |
-
if("A10G" in gpu_info):
|
6 |
-
os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl")
|
7 |
-
elif("T4" in gpu_info):
|
8 |
-
os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl")
|
9 |
-
|
10 |
-
os.system(f"git clone -b v1.5 https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui")
|
11 |
-
os.chdir("/home/user/app/stable-diffusion-webui")
|
12 |
-
|
13 |
-
os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py")
|
14 |
-
os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
15 |
-
os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
16 |
-
os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
17 |
-
os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
18 |
-
os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
19 |
-
os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''')
|
20 |
-
os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
21 |
-
os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py")
|
22 |
-
os.system(f"sed -i -e 's/ outputs=\[/queue=False, &/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
23 |
-
os.system(f"sed -i -e 's/ queue=False, / /g' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
24 |
-
|
25 |
-
# ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header----------------------------
|
26 |
-
os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py")
|
27 |
-
os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
|
28 |
-
# ---------------------------------------------------------------------------------------------------------------------------------------------------
|
29 |
-
|
30 |
-
if "IS_SHARED_UI" in os.environ:
|
31 |
-
os.system(f"rm -rfv /home/user/app/stable-diffusion-webui/scripts/")
|
32 |
-
|
33 |
-
os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json")
|
34 |
-
os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json")
|
35 |
-
|
36 |
-
os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}")
|
37 |
-
os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}")
|
38 |
-
os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}")
|
39 |
-
|
40 |
-
os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding")
|
41 |
-
else:
|
42 |
-
# Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py")
|
43 |
-
os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py")
|
44 |
-
|
45 |
-
# Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME")
|
46 |
-
#os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study")
|
47 |
-
os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser")
|
48 |
-
os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui")
|
49 |
-
|
50 |
-
# Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt")
|
51 |
-
#os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt")
|
52 |
-
#os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt")
|
53 |
-
#os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt")
|
54 |
-
#os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt")
|
55 |
-
#os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt")
|
56 |
-
#os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt")
|
57 |
-
#os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt")
|
58 |
-
#os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt")
|
59 |
-
#os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt")
|
60 |
-
#os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt")
|
61 |
-
|
62 |
-
#os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt")
|
63 |
-
#os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt")
|
64 |
-
|
65 |
-
#os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt")
|
66 |
-
#os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml")
|
67 |
-
|
68 |
-
os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.ckpt")
|
69 |
-
os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.yaml")
|
70 |
-
|
71 |
-
os.system(f"python launch.py --force-enable-xformers --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test")
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/docs/README_FR.md
DELETED
@@ -1,296 +0,0 @@
|
|
1 |
-
> **Note**
|
2 |
-
>
|
3 |
-
> Ce fichier README est généré automatiquement par le plugin de traduction markdown de ce projet et n'est peut - être pas correct à 100%.
|
4 |
-
>
|
5 |
-
|
6 |
-
# <img src="logo.png" width="40" > ChatGPT Optimisation Académique
|
7 |
-
|
8 |
-
**Si vous aimez ce projet, donnez-lui une étoile; si vous avez inventé des raccourcis académiques plus utiles ou des plugins fonctionnels, n'hésitez pas à ouvrir une demande ou une demande de traction. Nous avons également un fichier README en [anglais|](docs/README_EN.md)[japonais|](docs/README_JP.md)[russe|](docs/README_RS.md)[français](docs/README_FR.md) traduit par ce projet lui-même.**
|
9 |
-
|
10 |
-
> **Note**
|
11 |
-
>
|
12 |
-
> 1. Veuillez noter que seuls les plugins de fonction signalés en **rouge** sont capables de lire les fichiers, certains plugins se trouvent dans le **menu déroulant** de la section plugin. Nous sommes également les bienvenus avec la plus haute priorité pour traiter et accepter tout nouveau PR de plugin!
|
13 |
-
>
|
14 |
-
> 2. Chaque fichier dans ce projet est expliqué en détail dans l'auto-analyse [self_analysis.md](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). Avec l'itération des versions, vous pouvez également cliquer sur les plugins fonctionnels pertinents pour appeler GPT et générer un rapport d'auto-analyse projet mis à jour. Les questions fréquemment posées sont résumées dans le [wiki](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98).
|
15 |
-
>
|
16 |
-
|
17 |
-
<div align="center">
|
18 |
-
|
19 |
-
Fonctionnalité | Description
|
20 |
-
--- | ---
|
21 |
-
Polissage en un clic | Prend en charge la correction en un clic et la recherche d'erreurs de syntaxe dans les documents de recherche.
|
22 |
-
Traduction Chinois-Anglais en un clic | Une touche pour traduire la partie chinoise en anglais ou celle anglaise en chinois.
|
23 |
-
Explication de code en un clic | Affiche et explique correctement le code.
|
24 |
-
[Raccourcis clavier personnalisables](https://www.bilibili.com/video/BV14s4y1E7jN) | Prend en charge les raccourcis clavier personnalisables.
|
25 |
-
[Configuration du serveur proxy](https://www.bilibili.com/video/BV1rc411W7Dr) | Prend en charge la configuration du serveur proxy.
|
26 |
-
Conception modulaire | Prend en charge la personnalisation des plugins de fonctions et des [plugins] de fonctions hiérarchiques personnalisés, et les plugins prennent en charge [la mise à jour à chaud](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97).
|
27 |
-
[Auto-analyse du programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] [Lire en un clic](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A) le code source de ce projet.
|
28 |
-
[Analyse de programme](https://www.bilibili.com/video/BV1cj411A7VW) | [Plugins] En un clic, les projets Python/C/C++/Java/Lua/... peuvent être analysés.
|
29 |
-
Lire le document de recherche | [Plugins] Lisez le résumé de l'article en latex et générer un résumé.
|
30 |
-
Traduction et polissage de l'article complet en LaTeX | [Plugins] Une touche pour traduire ou corriger en LaTeX
|
31 |
-
Génération Commentaire de fonction en vrac | [Plugins] Lisez en un clic les fonctions et générez des commentaires de fonction.
|
32 |
-
Rapport d'analyse automatique des chats générés | [Plugins] Génère un rapport de synthèse après l'exécution.
|
33 |
-
[Assistant arxiv](https://www.bilibili.com/video/BV1LM4y1279X) | [Plugins] Entrez l'url de l'article arxiv pour traduire le résumé + télécharger le PDF en un clic
|
34 |
-
[Traduction complète des articles PDF](https://www.bilibili.com/video/BV1KT411x7Wn) | [Plugins] Extraire le titre et le résumé de l'article PDF + Traduire le texte entier (multithread)
|
35 |
-
[Aide à la recherche Google Academ](https://www.bilibili.com/video/BV19L411U7ia) | [Plugins] Donnez à GPT l'URL de n'importe quelle page de recherche Google Academ pour vous aider à sélectionner des articles intéressants
|
36 |
-
Affichage de formules/images/tableaux | Afficher la forme traduite et rendue d'une formule en même temps, plusieurs formules et surlignage du code prend en charge
|
37 |
-
Prise en charge des plugins multithread | Prise en charge de l'appel multithread de chatgpt, traitement en masse de texte ou de programmes en un clic
|
38 |
-
Activer le thème Gradio sombre [theme](https://github.com/binary-husky/chatgpt_academic/issues/173) au démarrage | Ajoutez ```/?__dark-theme=true``` à l'URL du navigateur pour basculer vers le thème sombre
|
39 |
-
[Prise en charge de plusieurs modèles LLM](https://www.bilibili.com/video/BV1wT411p7yf), [prise en charge de l'interface API2D](https://api2d.com/) | Comment cela serait-il de se faire servir par GPT3.5, GPT4 et la [ChatGLM de Tsinghua](https://github.com/THUDM/ChatGLM-6B) en même temps?
|
40 |
-
Expérience en ligne d'huggingface sans science | Après vous être connecté à huggingface, copiez [cet espace](https://huggingface.co/spaces/qingxu98/gpt-academic)
|
41 |
-
... | ...
|
42 |
-
|
43 |
-
</div>
|
44 |
-
|
45 |
-
|
46 |
-
Vous êtes un traducteur professionnel d'articles universitaires en français.
|
47 |
-
|
48 |
-
Ceci est un fichier Markdown, veuillez le traduire en français sans modifier les commandes Markdown existantes :
|
49 |
-
|
50 |
-
- Nouvelle interface (modifiable en modifiant l'option de mise en page dans config.py pour basculer entre les mises en page gauche-droite et haut-bas)
|
51 |
-
<div align="center">
|
52 |
-
<img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
|
53 |
-
</div>
|
54 |
-
|
55 |
-
|
56 |
-
- Tous les boutons sont générés dynamiquement en lisant functional.py, les utilisateurs peuvent ajouter librement des fonctions personnalisées pour libérer le presse-papiers.
|
57 |
-
<div align="center">
|
58 |
-
<img src="https://user-images.githubusercontent.com/96192199/231975334-b4788e91-4887-412f-8b43-2b9c5f41d248.gif" width="700" >
|
59 |
-
</div>
|
60 |
-
|
61 |
-
- Correction/amélioration
|
62 |
-
<div align="center">
|
63 |
-
<img src="https://user-images.githubusercontent.com/96192199/231980294-f374bdcb-3309-4560-b424-38ef39f04ebd.gif" width="700" >
|
64 |
-
</div>
|
65 |
-
|
66 |
-
- Si la sortie contient des formules, elles seront affichées simultanément sous forme de de texte brut et de forme rendue pour faciliter la copie et la lecture.
|
67 |
-
<div align="center">
|
68 |
-
<img src="https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png" width="700" >
|
69 |
-
</div>
|
70 |
-
|
71 |
-
- Pas envie de lire le code du projet ? Faites votre propre démo avec ChatGPT.
|
72 |
-
<div align="center">
|
73 |
-
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="700" >
|
74 |
-
</div>
|
75 |
-
|
76 |
-
- Utilisation combinée de plusieurs modèles de langage sophistiqués (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
|
77 |
-
<div align="center">
|
78 |
-
<img src="https://user-images.githubusercontent.com/96192199/232537274-deca0563-7aa6-4b5d-94a2-b7c453c47794.png" width="700" >
|
79 |
-
</div>
|
80 |
-
|
81 |
-
Utilisation combinée de plusieurs modèles de langage sophistiqués en version de test [huggingface](https://huggingface.co/spaces/qingxu98/academic-chatgpt-beta) (la version huggingface ne prend pas en charge Chatglm).
|
82 |
-
|
83 |
-
|
84 |
-
---
|
85 |
-
|
86 |
-
## Installation - Méthode 1 : Exécution directe (Windows, Linux or MacOS)
|
87 |
-
|
88 |
-
1. Téléchargez le projet
|
89 |
-
```sh
|
90 |
-
git clone https://github.com/binary-husky/chatgpt_academic.git
|
91 |
-
cd chatgpt_academic
|
92 |
-
```
|
93 |
-
|
94 |
-
2. Configuration de l'API_KEY et des paramètres de proxy
|
95 |
-
|
96 |
-
Dans `config.py`, configurez les paramètres de proxy et de clé d'API OpenAI, comme indiqué ci-dessous
|
97 |
-
```
|
98 |
-
1. Si vous êtes en Chine, vous devez configurer un proxy étranger pour utiliser l'API OpenAI en toute transparence. Pour ce faire, veuillez lire attentivement le fichier config.py (1. Modifiez l'option USE_PROXY ; 2. Modifiez les paramètres de proxies comme indiqué dans les instructions).
|
99 |
-
2. Configurez votre clé API OpenAI. Vous devez vous inscrire sur le site web d'OpenAI pour obtenir une clé API. Une fois que vous avez votre clé API, vous pouvez la configurer dans le fichier config.py.
|
100 |
-
3. Tous les problèmes liés aux réseaux de proxy (temps d'attente, non-fonctionnement des proxies) sont résumés dans https://github.com/binary-husky/chatgpt_academic/issues/1.
|
101 |
-
```
|
102 |
-
(Remarque : le programme vérifie d'abord s'il existe un fichier de configuration privé nommé `config_private.py`, et utilise les configurations de celui-ci à la place de celles du fichier `config.py`. Par conséquent, si vous comprenez notre logique de lecture de configuration, nous vous recommandons fortement de créer un nouveau fichier de configuration nommé `config_private.py` à côté de `config.py` et de transférer (copier) les configurations de celui-ci dans `config_private.py`. `config_private.py` n'est pas contrôlé par git et rend vos informations personnelles plus sûres.)
|
103 |
-
|
104 |
-
3. Installation des dépendances
|
105 |
-
```sh
|
106 |
-
# (Option 1) Recommandé
|
107 |
-
python -m pip install -r requirements.txt
|
108 |
-
|
109 |
-
# (Option 2) Si vous utilisez anaconda, les étapes sont similaires :
|
110 |
-
# (Option 2.1) conda create -n gptac_venv python=3.11
|
111 |
-
# (Option 2.2) conda activate gptac_venv
|
112 |
-
# (Option 2.3) python -m pip install -r requirements.txt
|
113 |
-
|
114 |
-
# note : Utilisez la source pip officielle ou la source pip Alibaba. D'autres sources (comme celles des universités) pourraient poser problème. Pour utiliser temporairement une autre source, utilisez :
|
115 |
-
# python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
|
116 |
-
```
|
117 |
-
|
118 |
-
Si vous avez besoin de soutenir ChatGLM de Tsinghua, vous devez installer plus de dépendances (si vous n'êtes pas familier avec Python ou que votre ordinateur n'est pas assez performant, nous vous recommandons de ne pas essayer) :
|
119 |
-
```sh
|
120 |
-
python -m pip install -r request_llm/requirements_chatglm.txt
|
121 |
-
```
|
122 |
-
|
123 |
-
4. Exécution
|
124 |
-
```sh
|
125 |
-
python main.py
|
126 |
-
```
|
127 |
-
|
128 |
-
5. Tester les plugins de fonctions
|
129 |
-
```
|
130 |
-
- Test Python Project Analysis
|
131 |
-
Dans la zone de saisie, entrez `./crazy_functions/test_project/python/dqn`, puis cliquez sur "Parse Entire Python Project"
|
132 |
-
- Test d'auto-lecture du code
|
133 |
-
Cliquez sur "[Démo multi-thread] Parser ce projet lui-même (auto-traduction de la source)"
|
134 |
-
- Test du modèle de fonctionnalité expérimentale (exige une réponse de l'IA à ce qui est arrivé aujourd'hui dans l'histoire). Vous pouvez utiliser cette fonctionnalité comme modèle pour des fonctions plus complexes.
|
135 |
-
Cliquez sur "[Démo modèle de plugin de fonction] Histoire du Jour"
|
136 |
-
- Le menu déroulant de la zone de plugin de fonctionnalité contient plus de fonctionnalités à sélectionner.
|
137 |
-
```
|
138 |
-
|
139 |
-
## Installation - Méthode 2 : Utilisation de docker (Linux)
|
140 |
-
|
141 |
-
|
142 |
-
Vous êtes un traducteur professionnel d'articles académiques en français.
|
143 |
-
|
144 |
-
1. ChatGPT seul (recommandé pour la plupart des gens)
|
145 |
-
``` sh
|
146 |
-
# Télécharger le projet
|
147 |
-
git clone https://github.com/binary-husky/chatgpt_academic.git
|
148 |
-
cd chatgpt_academic
|
149 |
-
# Configurer le proxy outre-mer et la clé API OpenAI
|
150 |
-
Modifier le fichier config.py avec n'importe quel éditeur de texte
|
151 |
-
# Installer
|
152 |
-
docker build -t gpt-academic .
|
153 |
-
# Exécuter
|
154 |
-
docker run --rm -it --net=host gpt-academic
|
155 |
-
|
156 |
-
# Tester les modules de fonction
|
157 |
-
## Tester la fonction modèle des modules (requiert la réponse de GPT à "qu'est-ce qui s'est passé dans l'histoire aujourd'hui ?"), vous pouvez utiliser cette fonction en tant que modèle pour implémenter des fonctions plus complexes.
|
158 |
-
Cliquez sur "[Exemple de modèle de module] Histoire d'aujourd'hui"
|
159 |
-
## Tester le résumé écrit pour le projet LaTeX
|
160 |
-
Dans la zone de saisie, tapez ./crazy_functions/test_project/latex/attention, puis cliquez sur "Lire le résumé de l'article de recherche LaTeX"
|
161 |
-
## Tester l'analyse du projet Python
|
162 |
-
Dans la zone de saisie, tapez ./crazy_functions/test_project/python/dqn, puis cliquez sur "Analyser l'ensemble du projet Python"
|
163 |
-
|
164 |
-
D'autres fonctions sont disponibles dans la liste déroulante des modules de fonction.
|
165 |
-
```
|
166 |
-
|
167 |
-
2. ChatGPT+ChatGLM (nécessite une grande connaissance de docker et une configuration informatique suffisamment puissante)
|
168 |
-
``` sh
|
169 |
-
# Modifier le dockerfile
|
170 |
-
cd docs && nano Dockerfile+ChatGLM
|
171 |
-
# Comment construire | 如何构建 (Dockerfile+ChatGLM在docs路径下,请先cd docs)
|
172 |
-
docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
173 |
-
# Comment exécuter | 如何运行 (1) Directement exécuter :
|
174 |
-
docker run --rm -it --net=host --gpus=all gpt-academic
|
175 |
-
# Comment exécuter | 如何运行 (2) Je veux effectuer quelques ajustements dans le conteneur avant de lancer :
|
176 |
-
docker run --rm -it --net=host --gpus=all gpt-academic bash
|
177 |
-
```
|
178 |
-
|
179 |
-
## Installation - Méthode 3 : Autres méthodes de déploiement
|
180 |
-
|
181 |
-
1. Déploiement sur un cloud serveur distant
|
182 |
-
Veuillez consulter le [wiki de déploiement-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
|
183 |
-
|
184 |
-
2. Utilisation de WSL2 (Windows Subsystem for Linux)
|
185 |
-
Veuillez consulter le [wiki de déploiement-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
|
186 |
-
|
187 |
-
|
188 |
-
## Configuration de la procuration de l'installation
|
189 |
-
### Méthode 1 : Méthode conventionnelle
|
190 |
-
[Configuration de la procuration](https://github.com/binary-husky/chatgpt_academic/issues/1)
|
191 |
-
|
192 |
-
### Méthode 2 : Tutoriel pour débutant pur
|
193 |
-
[Tutoriel pour débutant pur](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BB%A3%E7%90%86%E8%BD%AF%E4%BB%B6%E9%97%AE%E9%A2%98%E7%9A%84%E6%96%B0%E6%89%8B%E8%A7%A3%E5%86%B3%E6%96%B9%E6%B3%95%EF%BC%88%E6%96%B9%E6%B3%95%E5%8F%AA%E9%80%82%E7%94%A8%E4%BA%8E%E6%96%B0%E6%89%8B%EF%BC%89)
|
194 |
-
|
195 |
-
|
196 |
-
---
|
197 |
-
|
198 |
-
## Personnalisation des nouveaux boutons pratiques (personnalisation des raccourcis académiques)
|
199 |
-
Ouvrez le fichier `core_functional.py` avec n'importe quel éditeur de texte, ajoutez les éléments suivants, puis redémarrez le programme. (Si le bouton a déjà été ajouté avec succès et est visible, le préfixe et le suffixe pris en charge peuvent être modifiés à chaud sans avoir besoin de redémarrer le programme.)
|
200 |
-
Par exemple:
|
201 |
-
```
|
202 |
-
"Traduction Français-Chinois": {
|
203 |
-
# Préfixe, qui sera ajouté avant votre saisie. Par exemple, pour décrire votre demande, telle que la traduction, le débogage de code, l'amélioration, etc.
|
204 |
-
"Prefix": "Veuillez traduire le contenu ci-dessous en chinois, puis expliquer chaque terme propre mentionné dans un tableau Markdown :\n\n",
|
205 |
-
|
206 |
-
# Suffixe, qui sera ajouté après votre saisie. Par exemple, en combinaison avec un préfixe, vous pouvez mettre le contenu de votre saisie entre guillemets.
|
207 |
-
"Suffix": "",
|
208 |
-
},
|
209 |
-
```
|
210 |
-
|
211 |
-
<div align="center">
|
212 |
-
<img src="https://user-images.githubusercontent.com/96192199/226899272-477c2134-ed71-4326-810c-29891fe4a508.png" width="500" >
|
213 |
-
</div>
|
214 |
-
|
215 |
-
---
|
216 |
-
|
217 |
-
|
218 |
-
## Présentation de certaines fonctionnalités
|
219 |
-
|
220 |
-
### Affichage des images:
|
221 |
-
|
222 |
-
<div align="center">
|
223 |
-
<img src="https://user-images.githubusercontent.com/96192199/228737599-bf0a9d9c-1808-4f43-ae15-dfcc7af0f295.png" width="800" >
|
224 |
-
</div>
|
225 |
-
|
226 |
-
|
227 |
-
### Si un programme peut comprendre et décomposer lui-même :
|
228 |
-
|
229 |
-
<div align="center">
|
230 |
-
<img src="https://user-images.githubusercontent.com/96192199/226936850-c77d7183-0749-4c1c-9875-fd4891842d0c.png" width="800" >
|
231 |
-
</div>
|
232 |
-
|
233 |
-
<div align="center">
|
234 |
-
<img src="https://user-images.githubusercontent.com/96192199/226936618-9b487e4b-ab5b-4b6e-84c6-16942102e917.png" width="800" >
|
235 |
-
</div>
|
236 |
-
|
237 |
-
|
238 |
-
### Analyse de tout projet Python/Cpp quelconque :
|
239 |
-
<div align="center">
|
240 |
-
<img src="https://user-images.githubusercontent.com/96192199/226935232-6b6a73ce-8900-4aee-93f9-733c7e6fef53.png" width="800" >
|
241 |
-
</div>
|
242 |
-
|
243 |
-
<div align="center">
|
244 |
-
<img src="https://user-images.githubusercontent.com/96192199/226969067-968a27c1-1b9c-486b-8b81-ab2de8d3f88a.png" width="800" >
|
245 |
-
</div>
|
246 |
-
|
247 |
-
### Lecture et résumé générés automatiquement pour les articles en Latex
|
248 |
-
<div align="center">
|
249 |
-
<img src="https://user-images.githubusercontent.com/96192199/227504406-86ab97cd-f208-41c3-8e4a-7000e51cf980.png" width="800" >
|
250 |
-
</div>
|
251 |
-
|
252 |
-
### Génération de rapports automatique
|
253 |
-
<div align="center">
|
254 |
-
<img src="https://user-images.githubusercontent.com/96192199/227503770-fe29ce2c-53fd-47b0-b0ff-93805f0c2ff4.png" height="300" >
|
255 |
-
<img src="https://user-images.githubusercontent.com/96192199/227504617-7a497bb3-0a2a-4b50-9a8a-95ae60ea7afd.png" height="300" >
|
256 |
-
<img src="https://user-images.githubusercontent.com/96192199/227504005-efeaefe0-b687-49d0-bf95-2d7b7e66c348.png" height="300" >
|
257 |
-
</div>
|
258 |
-
|
259 |
-
### Conception de fonctionnalités modulaires
|
260 |
-
<div align="center">
|
261 |
-
<img src="https://user-images.githubusercontent.com/96192199/229288270-093643c1-0018-487a-81e6-1d7809b6e90f.png" height="400" >
|
262 |
-
<img src="https://user-images.githubusercontent.com/96192199/227504931-19955f78-45cd-4d1c-adac-e71e50957915.png" height="400" >
|
263 |
-
</div>
|
264 |
-
|
265 |
-
|
266 |
-
### Traduction de code source en anglais
|
267 |
-
|
268 |
-
<div align="center">
|
269 |
-
<img src="https://user-images.githubusercontent.com/96192199/229720562-fe6c3508-6142-4635-a83d-21eb3669baee.png" height="400" >
|
270 |
-
</div>
|
271 |
-
|
272 |
-
## À faire et planification de version :
|
273 |
-
- version 3.2+ (à faire) : Prise en charge de plus de paramètres d'interface de plugin de fonction
|
274 |
-
- version 3.1 : Prise en charge de l'interrogation simultanée de plusieurs modèles GPT ! Prise en charge de l'API2d, prise en charge de la répartition de charge de plusieurs clés API
|
275 |
-
- version 3.0 : Prise en charge de chatglm et d'autres petits llm
|
276 |
-
- version 2.6 : Réorganisation de la structure du plugin, amélioration de l'interactivité, ajout de plus de plugins
|
277 |
-
- version 2.5 : Mise à jour automatique, résolution du problème de dépassement de jeton et de texte trop long lors de la compilation du code source complet
|
278 |
-
- version 2.4 : (1) Ajout de la fonctionnalité de traduction intégrale de PDF ; (2) Ajout d'une fonctionnalité de changement de position de zone de saisie ; (3) Ajout d'une option de disposition verticale ; (4) Optimisation du plugin de fonction multi-thread.
|
279 |
-
- version 2.3 : Amélioration de l'interactivité multi-thread
|
280 |
-
- version 2.2 : Prise en charge du rechargement à chaud du plugin de fonction
|
281 |
-
- version 2.1 : Mise en page pliable
|
282 |
-
- version 2.0 : Introduction du plugin de fonction modulaire
|
283 |
-
- version 1.0 : Fonctionnalité de base
|
284 |
-
|
285 |
-
## Références et apprentissage
|
286 |
-
|
287 |
-
```
|
288 |
-
De nombreux designs d'autres projets exceptionnels ont été utilisés pour référence dans le code, notamment :
|
289 |
-
|
290 |
-
# Projet 1 : De nombreuses astuces ont été empruntées à ChuanhuChatGPT
|
291 |
-
https://github.com/GaiZhenbiao/ChuanhuChatGPT
|
292 |
-
|
293 |
-
# Projet 2 : ChatGLM-6B de Tsinghua :
|
294 |
-
https://github.com/THUDM/ChatGLM-6B
|
295 |
-
```
|
296 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amon1/ChatGPTForAcadamic/crazy_functions/test_project/python/dqn/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from stable_baselines3.dqn.dqn import DQN
|
2 |
-
from stable_baselines3.dqn.policies import CnnPolicy, MlpPolicy
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/datasets/latents_dataset.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
from torch.utils.data import Dataset
|
2 |
-
|
3 |
-
|
4 |
-
class LatentsDataset(Dataset):
|
5 |
-
|
6 |
-
def __init__(self, latents, opts):
|
7 |
-
self.latents = latents
|
8 |
-
self.opts = opts
|
9 |
-
|
10 |
-
def __len__(self):
|
11 |
-
return self.latents.shape[0]
|
12 |
-
|
13 |
-
def __getitem__(self, index):
|
14 |
-
|
15 |
-
return self.latents[index]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
# empty
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
|
16 |
-
from typing import List, Optional, Tuple, Union
|
17 |
-
|
18 |
-
import torch
|
19 |
-
|
20 |
-
from ...utils import randn_tensor
|
21 |
-
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
22 |
-
|
23 |
-
|
24 |
-
class DDPMPipeline(DiffusionPipeline):
|
25 |
-
r"""
|
26 |
-
Pipeline for image generation.
|
27 |
-
|
28 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
|
29 |
-
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
|
30 |
-
|
31 |
-
Parameters:
|
32 |
-
unet ([`UNet2DModel`]):
|
33 |
-
A `UNet2DModel` to denoise the encoded image latents.
|
34 |
-
scheduler ([`SchedulerMixin`]):
|
35 |
-
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
36 |
-
[`DDPMScheduler`], or [`DDIMScheduler`].
|
37 |
-
"""
|
38 |
-
|
39 |
-
def __init__(self, unet, scheduler):
|
40 |
-
super().__init__()
|
41 |
-
self.register_modules(unet=unet, scheduler=scheduler)
|
42 |
-
|
43 |
-
@torch.no_grad()
|
44 |
-
def __call__(
|
45 |
-
self,
|
46 |
-
batch_size: int = 1,
|
47 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
48 |
-
num_inference_steps: int = 1000,
|
49 |
-
output_type: Optional[str] = "pil",
|
50 |
-
return_dict: bool = True,
|
51 |
-
) -> Union[ImagePipelineOutput, Tuple]:
|
52 |
-
r"""
|
53 |
-
The call function to the pipeline for generation.
|
54 |
-
|
55 |
-
Args:
|
56 |
-
batch_size (`int`, *optional*, defaults to 1):
|
57 |
-
The number of images to generate.
|
58 |
-
generator (`torch.Generator`, *optional*):
|
59 |
-
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
|
60 |
-
generation deterministic.
|
61 |
-
num_inference_steps (`int`, *optional*, defaults to 1000):
|
62 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
63 |
-
expense of slower inference.
|
64 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
65 |
-
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
|
66 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
67 |
-
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
68 |
-
|
69 |
-
Example:
|
70 |
-
|
71 |
-
```py
|
72 |
-
>>> from diffusers import DDPMPipeline
|
73 |
-
|
74 |
-
>>> # load model and scheduler
|
75 |
-
>>> pipe = DDPMPipeline.from_pretrained("google/ddpm-cat-256")
|
76 |
-
|
77 |
-
>>> # run pipeline in inference (sample random noise and denoise)
|
78 |
-
>>> image = pipe().images[0]
|
79 |
-
|
80 |
-
>>> # save image
|
81 |
-
>>> image.save("ddpm_generated_image.png")
|
82 |
-
```
|
83 |
-
|
84 |
-
Returns:
|
85 |
-
[`~pipelines.ImagePipelineOutput`] or `tuple`:
|
86 |
-
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
|
87 |
-
returned where the first element is a list with the generated images
|
88 |
-
"""
|
89 |
-
# Sample gaussian noise to begin loop
|
90 |
-
if isinstance(self.unet.config.sample_size, int):
|
91 |
-
image_shape = (
|
92 |
-
batch_size,
|
93 |
-
self.unet.config.in_channels,
|
94 |
-
self.unet.config.sample_size,
|
95 |
-
self.unet.config.sample_size,
|
96 |
-
)
|
97 |
-
else:
|
98 |
-
image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
|
99 |
-
|
100 |
-
if self.device.type == "mps":
|
101 |
-
# randn does not work reproducibly on mps
|
102 |
-
image = randn_tensor(image_shape, generator=generator)
|
103 |
-
image = image.to(self.device)
|
104 |
-
else:
|
105 |
-
image = randn_tensor(image_shape, generator=generator, device=self.device)
|
106 |
-
|
107 |
-
# set step values
|
108 |
-
self.scheduler.set_timesteps(num_inference_steps)
|
109 |
-
|
110 |
-
for t in self.progress_bar(self.scheduler.timesteps):
|
111 |
-
# 1. predict noise model_output
|
112 |
-
model_output = self.unet(image, t).sample
|
113 |
-
|
114 |
-
# 2. compute previous image: x_t -> x_t-1
|
115 |
-
image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample
|
116 |
-
|
117 |
-
image = (image / 2 + 0.5).clamp(0, 1)
|
118 |
-
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
119 |
-
if output_type == "pil":
|
120 |
-
image = self.numpy_to_pil(image)
|
121 |
-
|
122 |
-
if not return_dict:
|
123 |
-
return (image,)
|
124 |
-
|
125 |
-
return ImagePipelineOutput(images=image)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_prior_emb2emb.py
DELETED
@@ -1,600 +0,0 @@
|
|
1 |
-
from typing import List, Optional, Union
|
2 |
-
|
3 |
-
import PIL
|
4 |
-
import torch
|
5 |
-
from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionModelWithProjection
|
6 |
-
|
7 |
-
from ...models import PriorTransformer
|
8 |
-
from ...schedulers import UnCLIPScheduler
|
9 |
-
from ...utils import (
|
10 |
-
is_accelerate_available,
|
11 |
-
is_accelerate_version,
|
12 |
-
logging,
|
13 |
-
randn_tensor,
|
14 |
-
replace_example_docstring,
|
15 |
-
)
|
16 |
-
from ..kandinsky import KandinskyPriorPipelineOutput
|
17 |
-
from ..pipeline_utils import DiffusionPipeline
|
18 |
-
|
19 |
-
|
20 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
21 |
-
|
22 |
-
EXAMPLE_DOC_STRING = """
|
23 |
-
Examples:
|
24 |
-
```py
|
25 |
-
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorEmb2EmbPipeline
|
26 |
-
>>> import torch
|
27 |
-
|
28 |
-
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(
|
29 |
-
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
|
30 |
-
... )
|
31 |
-
>>> pipe_prior.to("cuda")
|
32 |
-
|
33 |
-
>>> prompt = "red cat, 4k photo"
|
34 |
-
>>> img = load_image(
|
35 |
-
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
36 |
-
... "/kandinsky/cat.png"
|
37 |
-
... )
|
38 |
-
>>> image_emb, nagative_image_emb = pipe_prior(prompt, image=img, strength=0.2).to_tuple()
|
39 |
-
|
40 |
-
>>> pipe = KandinskyPipeline.from_pretrained(
|
41 |
-
... "kandinsky-community/kandinsky-2-2-decoder, torch_dtype=torch.float16"
|
42 |
-
... )
|
43 |
-
>>> pipe.to("cuda")
|
44 |
-
|
45 |
-
>>> image = pipe(
|
46 |
-
... image_embeds=image_emb,
|
47 |
-
... negative_image_embeds=negative_image_emb,
|
48 |
-
... height=768,
|
49 |
-
... width=768,
|
50 |
-
... num_inference_steps=100,
|
51 |
-
... ).images
|
52 |
-
|
53 |
-
>>> image[0].save("cat.png")
|
54 |
-
```
|
55 |
-
"""
|
56 |
-
|
57 |
-
EXAMPLE_INTERPOLATE_DOC_STRING = """
|
58 |
-
Examples:
|
59 |
-
```py
|
60 |
-
>>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22Pipeline
|
61 |
-
>>> from diffusers.utils import load_image
|
62 |
-
>>> import PIL
|
63 |
-
|
64 |
-
>>> import torch
|
65 |
-
>>> from torchvision import transforms
|
66 |
-
|
67 |
-
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
|
68 |
-
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
|
69 |
-
... )
|
70 |
-
>>> pipe_prior.to("cuda")
|
71 |
-
|
72 |
-
>>> img1 = load_image(
|
73 |
-
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
74 |
-
... "/kandinsky/cat.png"
|
75 |
-
... )
|
76 |
-
|
77 |
-
>>> img2 = load_image(
|
78 |
-
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
|
79 |
-
... "/kandinsky/starry_night.jpeg"
|
80 |
-
... )
|
81 |
-
|
82 |
-
>>> images_texts = ["a cat", img1, img2]
|
83 |
-
>>> weights = [0.3, 0.3, 0.4]
|
84 |
-
>>> image_emb, zero_image_emb = pipe_prior.interpolate(images_texts, weights)
|
85 |
-
|
86 |
-
>>> pipe = KandinskyV22Pipeline.from_pretrained(
|
87 |
-
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
|
88 |
-
... )
|
89 |
-
>>> pipe.to("cuda")
|
90 |
-
|
91 |
-
>>> image = pipe(
|
92 |
-
... image_embeds=image_emb,
|
93 |
-
... negative_image_embeds=zero_image_emb,
|
94 |
-
... height=768,
|
95 |
-
... width=768,
|
96 |
-
... num_inference_steps=150,
|
97 |
-
... ).images[0]
|
98 |
-
|
99 |
-
>>> image.save("starry_cat.png")
|
100 |
-
```
|
101 |
-
"""
|
102 |
-
|
103 |
-
|
104 |
-
class KandinskyV22PriorEmb2EmbPipeline(DiffusionPipeline):
|
105 |
-
"""
|
106 |
-
Pipeline for generating image prior for Kandinsky
|
107 |
-
|
108 |
-
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
109 |
-
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
110 |
-
|
111 |
-
Args:
|
112 |
-
prior ([`PriorTransformer`]):
|
113 |
-
The canonincal unCLIP prior to approximate the image embedding from the text embedding.
|
114 |
-
image_encoder ([`CLIPVisionModelWithProjection`]):
|
115 |
-
Frozen image-encoder.
|
116 |
-
text_encoder ([`CLIPTextModelWithProjection`]):
|
117 |
-
Frozen text-encoder.
|
118 |
-
tokenizer (`CLIPTokenizer`):
|
119 |
-
Tokenizer of class
|
120 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
121 |
-
scheduler ([`UnCLIPScheduler`]):
|
122 |
-
A scheduler to be used in combination with `prior` to generate image embedding.
|
123 |
-
"""
|
124 |
-
|
125 |
-
_exclude_from_cpu_offload = ["prior"]
|
126 |
-
|
127 |
-
def __init__(
|
128 |
-
self,
|
129 |
-
prior: PriorTransformer,
|
130 |
-
image_encoder: CLIPVisionModelWithProjection,
|
131 |
-
text_encoder: CLIPTextModelWithProjection,
|
132 |
-
tokenizer: CLIPTokenizer,
|
133 |
-
scheduler: UnCLIPScheduler,
|
134 |
-
image_processor: CLIPImageProcessor,
|
135 |
-
):
|
136 |
-
super().__init__()
|
137 |
-
|
138 |
-
self.register_modules(
|
139 |
-
prior=prior,
|
140 |
-
text_encoder=text_encoder,
|
141 |
-
tokenizer=tokenizer,
|
142 |
-
scheduler=scheduler,
|
143 |
-
image_encoder=image_encoder,
|
144 |
-
image_processor=image_processor,
|
145 |
-
)
|
146 |
-
|
147 |
-
def get_timesteps(self, num_inference_steps, strength, device):
|
148 |
-
# get the original timestep using init_timestep
|
149 |
-
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
|
150 |
-
|
151 |
-
t_start = max(num_inference_steps - init_timestep, 0)
|
152 |
-
timesteps = self.scheduler.timesteps[t_start:]
|
153 |
-
|
154 |
-
return timesteps, num_inference_steps - t_start
|
155 |
-
|
156 |
-
@torch.no_grad()
|
157 |
-
@replace_example_docstring(EXAMPLE_INTERPOLATE_DOC_STRING)
|
158 |
-
def interpolate(
|
159 |
-
self,
|
160 |
-
images_and_prompts: List[Union[str, PIL.Image.Image, torch.FloatTensor]],
|
161 |
-
weights: List[float],
|
162 |
-
num_images_per_prompt: int = 1,
|
163 |
-
num_inference_steps: int = 25,
|
164 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
165 |
-
latents: Optional[torch.FloatTensor] = None,
|
166 |
-
negative_prior_prompt: Optional[str] = None,
|
167 |
-
negative_prompt: str = "",
|
168 |
-
guidance_scale: float = 4.0,
|
169 |
-
device=None,
|
170 |
-
):
|
171 |
-
"""
|
172 |
-
Function invoked when using the prior pipeline for interpolation.
|
173 |
-
|
174 |
-
Args:
|
175 |
-
images_and_prompts (`List[Union[str, PIL.Image.Image, torch.FloatTensor]]`):
|
176 |
-
list of prompts and images to guide the image generation.
|
177 |
-
weights: (`List[float]`):
|
178 |
-
list of weights for each condition in `images_and_prompts`
|
179 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
180 |
-
The number of images to generate per prompt.
|
181 |
-
num_inference_steps (`int`, *optional*, defaults to 100):
|
182 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
183 |
-
expense of slower inference.
|
184 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
185 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
186 |
-
to make generation deterministic.
|
187 |
-
latents (`torch.FloatTensor`, *optional*):
|
188 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
189 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
190 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
191 |
-
negative_prior_prompt (`str`, *optional*):
|
192 |
-
The prompt not to guide the prior diffusion process. Ignored when not using guidance (i.e., ignored if
|
193 |
-
`guidance_scale` is less than `1`).
|
194 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
195 |
-
The prompt not to guide the image generation. Ignored when not using guidance (i.e., ignored if
|
196 |
-
`guidance_scale` is less than `1`).
|
197 |
-
guidance_scale (`float`, *optional*, defaults to 4.0):
|
198 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
199 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
200 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
201 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
202 |
-
usually at the expense of lower image quality.
|
203 |
-
|
204 |
-
Examples:
|
205 |
-
|
206 |
-
Returns:
|
207 |
-
[`KandinskyPriorPipelineOutput`] or `tuple`
|
208 |
-
"""
|
209 |
-
|
210 |
-
device = device or self.device
|
211 |
-
|
212 |
-
if len(images_and_prompts) != len(weights):
|
213 |
-
raise ValueError(
|
214 |
-
f"`images_and_prompts` contains {len(images_and_prompts)} items and `weights` contains {len(weights)} items - they should be lists of same length"
|
215 |
-
)
|
216 |
-
|
217 |
-
image_embeddings = []
|
218 |
-
for cond, weight in zip(images_and_prompts, weights):
|
219 |
-
if isinstance(cond, str):
|
220 |
-
image_emb = self(
|
221 |
-
cond,
|
222 |
-
num_inference_steps=num_inference_steps,
|
223 |
-
num_images_per_prompt=num_images_per_prompt,
|
224 |
-
generator=generator,
|
225 |
-
latents=latents,
|
226 |
-
negative_prompt=negative_prior_prompt,
|
227 |
-
guidance_scale=guidance_scale,
|
228 |
-
).image_embeds.unsqueeze(0)
|
229 |
-
|
230 |
-
elif isinstance(cond, (PIL.Image.Image, torch.Tensor)):
|
231 |
-
image_emb = self._encode_image(
|
232 |
-
cond, device=device, num_images_per_prompt=num_images_per_prompt
|
233 |
-
).unsqueeze(0)
|
234 |
-
|
235 |
-
else:
|
236 |
-
raise ValueError(
|
237 |
-
f"`images_and_prompts` can only contains elements to be of type `str`, `PIL.Image.Image` or `torch.Tensor` but is {type(cond)}"
|
238 |
-
)
|
239 |
-
|
240 |
-
image_embeddings.append(image_emb * weight)
|
241 |
-
|
242 |
-
image_emb = torch.cat(image_embeddings).sum(dim=0)
|
243 |
-
|
244 |
-
return KandinskyPriorPipelineOutput(image_embeds=image_emb, negative_image_embeds=torch.randn_like(image_emb))
|
245 |
-
|
246 |
-
def _encode_image(
|
247 |
-
self,
|
248 |
-
image: Union[torch.Tensor, List[PIL.Image.Image]],
|
249 |
-
device,
|
250 |
-
num_images_per_prompt,
|
251 |
-
):
|
252 |
-
if not isinstance(image, torch.Tensor):
|
253 |
-
image = self.image_processor(image, return_tensors="pt").pixel_values.to(
|
254 |
-
dtype=self.image_encoder.dtype, device=device
|
255 |
-
)
|
256 |
-
|
257 |
-
image_emb = self.image_encoder(image)["image_embeds"] # B, D
|
258 |
-
image_emb = image_emb.repeat_interleave(num_images_per_prompt, dim=0)
|
259 |
-
image_emb.to(device=device)
|
260 |
-
|
261 |
-
return image_emb
|
262 |
-
|
263 |
-
def prepare_latents(self, emb, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
|
264 |
-
emb = emb.to(device=device, dtype=dtype)
|
265 |
-
|
266 |
-
batch_size = batch_size * num_images_per_prompt
|
267 |
-
|
268 |
-
init_latents = emb
|
269 |
-
|
270 |
-
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
|
271 |
-
additional_image_per_prompt = batch_size // init_latents.shape[0]
|
272 |
-
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
|
273 |
-
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
|
274 |
-
raise ValueError(
|
275 |
-
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
|
276 |
-
)
|
277 |
-
else:
|
278 |
-
init_latents = torch.cat([init_latents], dim=0)
|
279 |
-
|
280 |
-
shape = init_latents.shape
|
281 |
-
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
282 |
-
|
283 |
-
# get latents
|
284 |
-
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
|
285 |
-
latents = init_latents
|
286 |
-
|
287 |
-
return latents
|
288 |
-
|
289 |
-
# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline.get_zero_embed
|
290 |
-
def get_zero_embed(self, batch_size=1, device=None):
|
291 |
-
device = device or self.device
|
292 |
-
zero_img = torch.zeros(1, 3, self.image_encoder.config.image_size, self.image_encoder.config.image_size).to(
|
293 |
-
device=device, dtype=self.image_encoder.dtype
|
294 |
-
)
|
295 |
-
zero_image_emb = self.image_encoder(zero_img)["image_embeds"]
|
296 |
-
zero_image_emb = zero_image_emb.repeat(batch_size, 1)
|
297 |
-
return zero_image_emb
|
298 |
-
|
299 |
-
# Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_prior.KandinskyPriorPipeline._encode_prompt
|
300 |
-
def _encode_prompt(
|
301 |
-
self,
|
302 |
-
prompt,
|
303 |
-
device,
|
304 |
-
num_images_per_prompt,
|
305 |
-
do_classifier_free_guidance,
|
306 |
-
negative_prompt=None,
|
307 |
-
):
|
308 |
-
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
309 |
-
# get prompt text embeddings
|
310 |
-
text_inputs = self.tokenizer(
|
311 |
-
prompt,
|
312 |
-
padding="max_length",
|
313 |
-
max_length=self.tokenizer.model_max_length,
|
314 |
-
truncation=True,
|
315 |
-
return_tensors="pt",
|
316 |
-
)
|
317 |
-
text_input_ids = text_inputs.input_ids
|
318 |
-
text_mask = text_inputs.attention_mask.bool().to(device)
|
319 |
-
|
320 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
321 |
-
|
322 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
323 |
-
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
|
324 |
-
logger.warning(
|
325 |
-
"The following part of your input was truncated because CLIP can only handle sequences up to"
|
326 |
-
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
|
327 |
-
)
|
328 |
-
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
|
329 |
-
|
330 |
-
text_encoder_output = self.text_encoder(text_input_ids.to(device))
|
331 |
-
|
332 |
-
prompt_embeds = text_encoder_output.text_embeds
|
333 |
-
text_encoder_hidden_states = text_encoder_output.last_hidden_state
|
334 |
-
|
335 |
-
prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
|
336 |
-
text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
|
337 |
-
text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
|
338 |
-
|
339 |
-
if do_classifier_free_guidance:
|
340 |
-
uncond_tokens: List[str]
|
341 |
-
if negative_prompt is None:
|
342 |
-
uncond_tokens = [""] * batch_size
|
343 |
-
elif type(prompt) is not type(negative_prompt):
|
344 |
-
raise TypeError(
|
345 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
346 |
-
f" {type(prompt)}."
|
347 |
-
)
|
348 |
-
elif isinstance(negative_prompt, str):
|
349 |
-
uncond_tokens = [negative_prompt]
|
350 |
-
elif batch_size != len(negative_prompt):
|
351 |
-
raise ValueError(
|
352 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
353 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
354 |
-
" the batch size of `prompt`."
|
355 |
-
)
|
356 |
-
else:
|
357 |
-
uncond_tokens = negative_prompt
|
358 |
-
|
359 |
-
uncond_input = self.tokenizer(
|
360 |
-
uncond_tokens,
|
361 |
-
padding="max_length",
|
362 |
-
max_length=self.tokenizer.model_max_length,
|
363 |
-
truncation=True,
|
364 |
-
return_tensors="pt",
|
365 |
-
)
|
366 |
-
uncond_text_mask = uncond_input.attention_mask.bool().to(device)
|
367 |
-
negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
|
368 |
-
|
369 |
-
negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds
|
370 |
-
uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state
|
371 |
-
|
372 |
-
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
|
373 |
-
|
374 |
-
seq_len = negative_prompt_embeds.shape[1]
|
375 |
-
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
|
376 |
-
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
|
377 |
-
|
378 |
-
seq_len = uncond_text_encoder_hidden_states.shape[1]
|
379 |
-
uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
|
380 |
-
uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
|
381 |
-
batch_size * num_images_per_prompt, seq_len, -1
|
382 |
-
)
|
383 |
-
uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
|
384 |
-
|
385 |
-
# done duplicates
|
386 |
-
|
387 |
-
# For classifier free guidance, we need to do two forward passes.
|
388 |
-
# Here we concatenate the unconditional and text embeddings into a single batch
|
389 |
-
# to avoid doing two forward passes
|
390 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
|
391 |
-
text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
|
392 |
-
|
393 |
-
text_mask = torch.cat([uncond_text_mask, text_mask])
|
394 |
-
|
395 |
-
return prompt_embeds, text_encoder_hidden_states, text_mask
|
396 |
-
|
397 |
-
def enable_model_cpu_offload(self, gpu_id=0):
|
398 |
-
r"""
|
399 |
-
Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
|
400 |
-
to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
|
401 |
-
method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
|
402 |
-
`enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
|
403 |
-
"""
|
404 |
-
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
|
405 |
-
from accelerate import cpu_offload_with_hook
|
406 |
-
else:
|
407 |
-
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
|
408 |
-
|
409 |
-
device = torch.device(f"cuda:{gpu_id}")
|
410 |
-
|
411 |
-
if self.device.type != "cpu":
|
412 |
-
self.to("cpu", silence_dtype_warnings=True)
|
413 |
-
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
|
414 |
-
|
415 |
-
hook = None
|
416 |
-
for cpu_offloaded_model in [self.text_encoder, self.prior]:
|
417 |
-
_, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
|
418 |
-
|
419 |
-
# We'll offload the last model manually.
|
420 |
-
self.prior_hook = hook
|
421 |
-
|
422 |
-
_, hook = cpu_offload_with_hook(self.image_encoder, device, prev_module_hook=self.prior_hook)
|
423 |
-
|
424 |
-
self.final_offload_hook = hook
|
425 |
-
|
426 |
-
@torch.no_grad()
|
427 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
428 |
-
def __call__(
|
429 |
-
self,
|
430 |
-
prompt: Union[str, List[str]],
|
431 |
-
image: Union[torch.Tensor, List[torch.Tensor], PIL.Image.Image, List[PIL.Image.Image]],
|
432 |
-
strength: float = 0.3,
|
433 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
434 |
-
num_images_per_prompt: int = 1,
|
435 |
-
num_inference_steps: int = 25,
|
436 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
437 |
-
latents: Optional[torch.FloatTensor] = None,
|
438 |
-
guidance_scale: float = 4.0,
|
439 |
-
output_type: Optional[str] = "pt", # pt only
|
440 |
-
return_dict: bool = True,
|
441 |
-
):
|
442 |
-
"""
|
443 |
-
Function invoked when calling the pipeline for generation.
|
444 |
-
|
445 |
-
Args:
|
446 |
-
prompt (`str` or `List[str]`):
|
447 |
-
The prompt or prompts to guide the image generation.
|
448 |
-
strength (`float`, *optional*, defaults to 0.8):
|
449 |
-
Conceptually, indicates how much to transform the reference `emb`. Must be between 0 and 1. `image`
|
450 |
-
will be used as a starting point, adding more noise to it the larger the `strength`. The number of
|
451 |
-
denoising steps depends on the amount of noise initially added.
|
452 |
-
emb (`torch.FloatTensor`):
|
453 |
-
The image embedding.
|
454 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
455 |
-
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
456 |
-
if `guidance_scale` is less than `1`).
|
457 |
-
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
458 |
-
The number of images to generate per prompt.
|
459 |
-
num_inference_steps (`int`, *optional*, defaults to 100):
|
460 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
461 |
-
expense of slower inference.
|
462 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
463 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
464 |
-
to make generation deterministic.
|
465 |
-
latents (`torch.FloatTensor`, *optional*):
|
466 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
467 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
468 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
469 |
-
guidance_scale (`float`, *optional*, defaults to 4.0):
|
470 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
471 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
472 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
473 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
474 |
-
usually at the expense of lower image quality.
|
475 |
-
output_type (`str`, *optional*, defaults to `"pt"`):
|
476 |
-
The output format of the generate image. Choose between: `"np"` (`np.array`) or `"pt"`
|
477 |
-
(`torch.Tensor`).
|
478 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
479 |
-
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
480 |
-
|
481 |
-
Examples:
|
482 |
-
|
483 |
-
Returns:
|
484 |
-
[`KandinskyPriorPipelineOutput`] or `tuple`
|
485 |
-
"""
|
486 |
-
|
487 |
-
if isinstance(prompt, str):
|
488 |
-
prompt = [prompt]
|
489 |
-
elif not isinstance(prompt, list):
|
490 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
491 |
-
|
492 |
-
if isinstance(negative_prompt, str):
|
493 |
-
negative_prompt = [negative_prompt]
|
494 |
-
elif not isinstance(negative_prompt, list) and negative_prompt is not None:
|
495 |
-
raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}")
|
496 |
-
|
497 |
-
# if the negative prompt is defined we double the batch size to
|
498 |
-
# directly retrieve the negative prompt embedding
|
499 |
-
if negative_prompt is not None:
|
500 |
-
prompt = prompt + negative_prompt
|
501 |
-
negative_prompt = 2 * negative_prompt
|
502 |
-
|
503 |
-
device = self._execution_device
|
504 |
-
|
505 |
-
batch_size = len(prompt)
|
506 |
-
batch_size = batch_size * num_images_per_prompt
|
507 |
-
|
508 |
-
do_classifier_free_guidance = guidance_scale > 1.0
|
509 |
-
prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt(
|
510 |
-
prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
|
511 |
-
)
|
512 |
-
|
513 |
-
if not isinstance(image, List):
|
514 |
-
image = [image]
|
515 |
-
|
516 |
-
if isinstance(image[0], torch.Tensor):
|
517 |
-
image = torch.cat(image, dim=0)
|
518 |
-
|
519 |
-
if isinstance(image, torch.Tensor) and image.ndim == 2:
|
520 |
-
# allow user to pass image_embeds directly
|
521 |
-
image_embeds = image.repeat_interleave(num_images_per_prompt, dim=0)
|
522 |
-
elif isinstance(image, torch.Tensor) and image.ndim != 4:
|
523 |
-
raise ValueError(
|
524 |
-
f" if pass `image` as pytorch tensor, or a list of pytorch tensor, please make sure each tensor has shape [batch_size, channels, height, width], currently {image[0].unsqueeze(0).shape}"
|
525 |
-
)
|
526 |
-
else:
|
527 |
-
image_embeds = self._encode_image(image, device, num_images_per_prompt)
|
528 |
-
|
529 |
-
# prior
|
530 |
-
self.scheduler.set_timesteps(num_inference_steps, device=device)
|
531 |
-
|
532 |
-
latents = image_embeds
|
533 |
-
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
|
534 |
-
latent_timestep = timesteps[:1].repeat(batch_size)
|
535 |
-
latents = self.prepare_latents(
|
536 |
-
latents,
|
537 |
-
latent_timestep,
|
538 |
-
batch_size // num_images_per_prompt,
|
539 |
-
num_images_per_prompt,
|
540 |
-
prompt_embeds.dtype,
|
541 |
-
device,
|
542 |
-
generator,
|
543 |
-
)
|
544 |
-
|
545 |
-
for i, t in enumerate(self.progress_bar(timesteps)):
|
546 |
-
# expand the latents if we are doing classifier free guidance
|
547 |
-
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
548 |
-
|
549 |
-
predicted_image_embedding = self.prior(
|
550 |
-
latent_model_input,
|
551 |
-
timestep=t,
|
552 |
-
proj_embedding=prompt_embeds,
|
553 |
-
encoder_hidden_states=text_encoder_hidden_states,
|
554 |
-
attention_mask=text_mask,
|
555 |
-
).predicted_image_embedding
|
556 |
-
|
557 |
-
if do_classifier_free_guidance:
|
558 |
-
predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
|
559 |
-
predicted_image_embedding = predicted_image_embedding_uncond + guidance_scale * (
|
560 |
-
predicted_image_embedding_text - predicted_image_embedding_uncond
|
561 |
-
)
|
562 |
-
|
563 |
-
if i + 1 == timesteps.shape[0]:
|
564 |
-
prev_timestep = None
|
565 |
-
else:
|
566 |
-
prev_timestep = timesteps[i + 1]
|
567 |
-
|
568 |
-
latents = self.scheduler.step(
|
569 |
-
predicted_image_embedding,
|
570 |
-
timestep=t,
|
571 |
-
sample=latents,
|
572 |
-
generator=generator,
|
573 |
-
prev_timestep=prev_timestep,
|
574 |
-
).prev_sample
|
575 |
-
|
576 |
-
latents = self.prior.post_process_latents(latents)
|
577 |
-
|
578 |
-
image_embeddings = latents
|
579 |
-
|
580 |
-
# if negative prompt has been defined, we retrieve split the image embedding into two
|
581 |
-
if negative_prompt is None:
|
582 |
-
zero_embeds = self.get_zero_embed(latents.shape[0], device=latents.device)
|
583 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
584 |
-
self.final_offload_hook.offload()
|
585 |
-
else:
|
586 |
-
image_embeddings, zero_embeds = image_embeddings.chunk(2)
|
587 |
-
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
|
588 |
-
self.prior_hook.offload()
|
589 |
-
|
590 |
-
if output_type not in ["pt", "np"]:
|
591 |
-
raise ValueError(f"Only the output types `pt` and `np` are supported not output_type={output_type}")
|
592 |
-
|
593 |
-
if output_type == "np":
|
594 |
-
image_embeddings = image_embeddings.cpu().numpy()
|
595 |
-
zero_embeds = zero_embeds.cpu().numpy()
|
596 |
-
|
597 |
-
if not return_dict:
|
598 |
-
return (image_embeddings, zero_embeds)
|
599 |
-
|
600 |
-
return KandinskyPriorPipelineOutput(image_embeds=image_embeddings, negative_image_embeds=zero_embeds)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_xl/__init__.py
DELETED
File without changes
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/text_to_video/test_video_to_video.py
DELETED
@@ -1,195 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import random
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
import torch
|
21 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
22 |
-
|
23 |
-
from diffusers import (
|
24 |
-
AutoencoderKL,
|
25 |
-
DDIMScheduler,
|
26 |
-
UNet3DConditionModel,
|
27 |
-
VideoToVideoSDPipeline,
|
28 |
-
)
|
29 |
-
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
|
30 |
-
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
|
31 |
-
|
32 |
-
from ..pipeline_params import (
|
33 |
-
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
|
34 |
-
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
|
35 |
-
)
|
36 |
-
from ..test_pipelines_common import PipelineTesterMixin
|
37 |
-
|
38 |
-
|
39 |
-
enable_full_determinism()
|
40 |
-
|
41 |
-
|
42 |
-
@skip_mps
|
43 |
-
class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
|
44 |
-
pipeline_class = VideoToVideoSDPipeline
|
45 |
-
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"}
|
46 |
-
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"}
|
47 |
-
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
|
48 |
-
test_attention_slicing = False
|
49 |
-
|
50 |
-
# No `output_type`.
|
51 |
-
required_optional_params = frozenset(
|
52 |
-
[
|
53 |
-
"num_inference_steps",
|
54 |
-
"generator",
|
55 |
-
"latents",
|
56 |
-
"return_dict",
|
57 |
-
"callback",
|
58 |
-
"callback_steps",
|
59 |
-
]
|
60 |
-
)
|
61 |
-
|
62 |
-
def get_dummy_components(self):
|
63 |
-
torch.manual_seed(0)
|
64 |
-
unet = UNet3DConditionModel(
|
65 |
-
block_out_channels=(32, 64, 64, 64),
|
66 |
-
layers_per_block=2,
|
67 |
-
sample_size=32,
|
68 |
-
in_channels=4,
|
69 |
-
out_channels=4,
|
70 |
-
down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D"),
|
71 |
-
up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
|
72 |
-
cross_attention_dim=32,
|
73 |
-
attention_head_dim=4,
|
74 |
-
)
|
75 |
-
scheduler = DDIMScheduler(
|
76 |
-
beta_start=0.00085,
|
77 |
-
beta_end=0.012,
|
78 |
-
beta_schedule="scaled_linear",
|
79 |
-
clip_sample=False,
|
80 |
-
set_alpha_to_one=False,
|
81 |
-
)
|
82 |
-
torch.manual_seed(0)
|
83 |
-
vae = AutoencoderKL(
|
84 |
-
block_out_channels=[32, 64],
|
85 |
-
in_channels=3,
|
86 |
-
out_channels=3,
|
87 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
88 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
89 |
-
latent_channels=4,
|
90 |
-
sample_size=128,
|
91 |
-
)
|
92 |
-
torch.manual_seed(0)
|
93 |
-
text_encoder_config = CLIPTextConfig(
|
94 |
-
bos_token_id=0,
|
95 |
-
eos_token_id=2,
|
96 |
-
hidden_size=32,
|
97 |
-
intermediate_size=37,
|
98 |
-
layer_norm_eps=1e-05,
|
99 |
-
num_attention_heads=4,
|
100 |
-
num_hidden_layers=5,
|
101 |
-
pad_token_id=1,
|
102 |
-
vocab_size=1000,
|
103 |
-
hidden_act="gelu",
|
104 |
-
projection_dim=512,
|
105 |
-
)
|
106 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
107 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
108 |
-
|
109 |
-
components = {
|
110 |
-
"unet": unet,
|
111 |
-
"scheduler": scheduler,
|
112 |
-
"vae": vae,
|
113 |
-
"text_encoder": text_encoder,
|
114 |
-
"tokenizer": tokenizer,
|
115 |
-
}
|
116 |
-
return components
|
117 |
-
|
118 |
-
def get_dummy_inputs(self, device, seed=0):
|
119 |
-
# 3 frames
|
120 |
-
video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device)
|
121 |
-
|
122 |
-
if str(device).startswith("mps"):
|
123 |
-
generator = torch.manual_seed(seed)
|
124 |
-
else:
|
125 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
126 |
-
inputs = {
|
127 |
-
"prompt": "A painting of a squirrel eating a burger",
|
128 |
-
"video": video,
|
129 |
-
"generator": generator,
|
130 |
-
"num_inference_steps": 2,
|
131 |
-
"guidance_scale": 6.0,
|
132 |
-
"output_type": "pt",
|
133 |
-
}
|
134 |
-
return inputs
|
135 |
-
|
136 |
-
def test_text_to_video_default_case(self):
|
137 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
138 |
-
components = self.get_dummy_components()
|
139 |
-
sd_pipe = VideoToVideoSDPipeline(**components)
|
140 |
-
sd_pipe = sd_pipe.to(device)
|
141 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
142 |
-
|
143 |
-
inputs = self.get_dummy_inputs(device)
|
144 |
-
inputs["output_type"] = "np"
|
145 |
-
frames = sd_pipe(**inputs).frames
|
146 |
-
image_slice = frames[0][-3:, -3:, -1]
|
147 |
-
|
148 |
-
assert frames[0].shape == (32, 32, 3)
|
149 |
-
expected_slice = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131])
|
150 |
-
|
151 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
152 |
-
|
153 |
-
@unittest.skipIf(
|
154 |
-
torch_device != "cuda" or not is_xformers_available(),
|
155 |
-
reason="XFormers attention is only available with CUDA and `xformers` installed",
|
156 |
-
)
|
157 |
-
def test_xformers_attention_forwardGenerator_pass(self):
|
158 |
-
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3)
|
159 |
-
|
160 |
-
# (todo): sayakpaul
|
161 |
-
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
|
162 |
-
def test_inference_batch_consistent(self):
|
163 |
-
pass
|
164 |
-
|
165 |
-
# (todo): sayakpaul
|
166 |
-
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.")
|
167 |
-
def test_inference_batch_single_identical(self):
|
168 |
-
pass
|
169 |
-
|
170 |
-
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.")
|
171 |
-
def test_num_images_per_prompt(self):
|
172 |
-
pass
|
173 |
-
|
174 |
-
def test_progress_bar(self):
|
175 |
-
return super().test_progress_bar()
|
176 |
-
|
177 |
-
|
178 |
-
@slow
|
179 |
-
@skip_mps
|
180 |
-
class VideoToVideoSDPipelineSlowTests(unittest.TestCase):
|
181 |
-
def test_two_step_model(self):
|
182 |
-
pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL", torch_dtype=torch.float16)
|
183 |
-
pipe.enable_model_cpu_offload()
|
184 |
-
|
185 |
-
# 10 frames
|
186 |
-
generator = torch.Generator(device="cpu").manual_seed(0)
|
187 |
-
video = torch.randn((1, 10, 3, 1024, 576), generator=generator)
|
188 |
-
video = video.to("cuda")
|
189 |
-
|
190 |
-
prompt = "Spiderman is surfing"
|
191 |
-
|
192 |
-
video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="pt").frames
|
193 |
-
|
194 |
-
expected_array = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656])
|
195 |
-
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array).sum() < 1e-2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/double_heads/README.md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
# Rethinking Classification and Localization for Object Detection
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@article{wu2019rethinking,
|
9 |
-
title={Rethinking Classification and Localization for Object Detection},
|
10 |
-
author={Yue Wu and Yinpeng Chen and Lu Yuan and Zicheng Liu and Lijuan Wang and Hongzhi Li and Yun Fu},
|
11 |
-
year={2019},
|
12 |
-
eprint={1904.06493},
|
13 |
-
archivePrefix={arXiv},
|
14 |
-
primaryClass={cs.CV}
|
15 |
-
}
|
16 |
-
```
|
17 |
-
|
18 |
-
## Results and models
|
19 |
-
|
20 |
-
| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|
21 |
-
| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :------: | :--------: |
|
22 |
-
| R-50-FPN | pytorch | 1x | 6.8 | 9.5 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130_220238.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/hrnet/mask_rcnn_hrnetv2p_w18_2x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_hrnetv2p_w18_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[16, 22])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
spaces/Ani1712full/Estimacion_tasa_morosidad/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Estimacion_tasa_morosidad
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.11
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-4.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/engine/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from .test import (collect_results_cpu, collect_results_gpu, multi_gpu_test,
|
3 |
-
single_gpu_test)
|
4 |
-
|
5 |
-
__all__ = [
|
6 |
-
'collect_results_cpu', 'collect_results_gpu', 'multi_gpu_test',
|
7 |
-
'single_gpu_test'
|
8 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/training/data.py
DELETED
@@ -1,975 +0,0 @@
|
|
1 |
-
import ast
|
2 |
-
import json
|
3 |
-
import logging
|
4 |
-
import math
|
5 |
-
import os
|
6 |
-
import random
|
7 |
-
import h5py
|
8 |
-
from dataclasses import dataclass
|
9 |
-
from models.CLAP.training.params import parse_args
|
10 |
-
import braceexpand
|
11 |
-
import numpy as np
|
12 |
-
import pandas as pd
|
13 |
-
import torch
|
14 |
-
import torch.nn as nn
|
15 |
-
import torch.nn.functional as F
|
16 |
-
import torchvision.datasets as datasets
|
17 |
-
import torchvision.transforms
|
18 |
-
import webdataset as wds
|
19 |
-
from PIL import Image
|
20 |
-
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
|
21 |
-
from torch.utils.data.distributed import DistributedSampler
|
22 |
-
from functools import partial
|
23 |
-
import soundfile as sf
|
24 |
-
import io
|
25 |
-
from pathlib import Path
|
26 |
-
import wget
|
27 |
-
|
28 |
-
from models.CLAP.open_clip.utils import get_tar_path_from_dataset_name, dataset_split
|
29 |
-
from models.CLAP.open_clip.utils import load_p, load_class_label
|
30 |
-
import tempfile
|
31 |
-
import copy
|
32 |
-
|
33 |
-
try:
|
34 |
-
import horovod.torch as hvd
|
35 |
-
except ImportError:
|
36 |
-
hvd = None
|
37 |
-
|
38 |
-
try:
|
39 |
-
import torchaudio
|
40 |
-
except ImportError:
|
41 |
-
torchaudio = None
|
42 |
-
|
43 |
-
from models.CLAP.open_clip import tokenize
|
44 |
-
|
45 |
-
|
46 |
-
def tokenizer(text):
|
47 |
-
return tokenize(text).squeeze(0)
|
48 |
-
|
49 |
-
|
50 |
-
from transformers import RobertaTokenizer
|
51 |
-
|
52 |
-
tokenize = RobertaTokenizer.from_pretrained("roberta-base")
|
53 |
-
|
54 |
-
|
55 |
-
def tokenizer(text):
|
56 |
-
result = tokenize(
|
57 |
-
text,
|
58 |
-
padding="max_length",
|
59 |
-
truncation=True,
|
60 |
-
max_length=77,
|
61 |
-
return_tensors="pt",
|
62 |
-
)
|
63 |
-
return {k: v.squeeze(0) for k, v in result.items()}
|
64 |
-
|
65 |
-
|
66 |
-
# initizlied the audioset map
|
67 |
-
_AUDIOSET_MAP_PATH = os.path.join(Path(__file__).parent, "audioset_textmap.npy")
|
68 |
-
_AUDIOSET_MAP = np.load(_AUDIOSET_MAP_PATH, allow_pickle=True)
|
69 |
-
|
70 |
-
|
71 |
-
def int16_to_float32(x):
|
72 |
-
return (x / 32767.0).astype(np.float32)
|
73 |
-
|
74 |
-
|
75 |
-
def float32_to_int16(x):
|
76 |
-
x = np.clip(x, a_min=-1.0, a_max=1.0)
|
77 |
-
return (x * 32767.0).astype(np.int16)
|
78 |
-
|
79 |
-
|
80 |
-
# For Toy Dataset
|
81 |
-
class ToyDataset(Dataset):
|
82 |
-
def __init__(self, index_path, ipc, config, eval_mode=False):
|
83 |
-
"""Toy Dataset for testing the audioset input with text labels
|
84 |
-
Parameters
|
85 |
-
----------
|
86 |
-
index_path: str
|
87 |
-
the link to the h5 file of each audio
|
88 |
-
idc: str
|
89 |
-
the link to the npy file, the number of samples in each class
|
90 |
-
config: dict
|
91 |
-
the audio cfg file
|
92 |
-
eval_model (bool): to indicate if the dataset is a testing dataset
|
93 |
-
"""
|
94 |
-
self.audio_cfg = config["audio_cfg"]
|
95 |
-
self.text_cfg = config["text_cfg"]
|
96 |
-
self.fp = h5py.File(index_path, "r")
|
97 |
-
self.ipc = np.load(ipc, allow_pickle=True)
|
98 |
-
self.total_size = len(self.fp["audio_name"])
|
99 |
-
self.classes_num = self.audio_cfg["class_num"]
|
100 |
-
self.eval_mode = eval_mode
|
101 |
-
|
102 |
-
if not eval_mode:
|
103 |
-
self.generate_queue()
|
104 |
-
else:
|
105 |
-
self.queue = []
|
106 |
-
for i in range(self.total_size):
|
107 |
-
target = self.fp["target"][i]
|
108 |
-
if np.sum(target) > 0:
|
109 |
-
self.queue.append(i)
|
110 |
-
self.total_size = len(self.queue)
|
111 |
-
logging.info("total dataset size: %d" % (self.total_size))
|
112 |
-
logging.info("class num: %d" % (self.classes_num))
|
113 |
-
|
114 |
-
def time_shifting(self, x):
|
115 |
-
frame_num = len(x)
|
116 |
-
shift_len = random.randint(0, frame_num - 1)
|
117 |
-
new_sample = np.concatenate([x[shift_len:], x[:shift_len]], axis=0)
|
118 |
-
return new_sample
|
119 |
-
|
120 |
-
def generate_queue(self):
|
121 |
-
self.queue = []
|
122 |
-
while len(self.queue) < self.total_size:
|
123 |
-
class_set = [*range(self.classes_num)]
|
124 |
-
random.shuffle(class_set)
|
125 |
-
self.queue += [
|
126 |
-
self.ipc[d][random.randint(0, len(self.ipc[d]) - 1)] for d in class_set
|
127 |
-
]
|
128 |
-
self.queue = self.queue[: self.total_size]
|
129 |
-
|
130 |
-
logging.info("queue regenerated:%s" % (self.queue[-5:]))
|
131 |
-
|
132 |
-
def crop_wav(self, x):
|
133 |
-
crop_size = self.audio_cfg["crop_size"]
|
134 |
-
crop_pos = random.randint(0, len(x) - crop_size - 1)
|
135 |
-
return x[crop_pos : crop_pos + crop_size]
|
136 |
-
|
137 |
-
def prompt_text(self, target):
|
138 |
-
events = _AUDIOSET_MAP[np.where(target > 0)]
|
139 |
-
event_text = "The sounds of " + ", ".join(events[:-1]) + " and " + events[-1]
|
140 |
-
text = tokenize(event_text)[0]
|
141 |
-
return text
|
142 |
-
|
143 |
-
def __getitem__(self, index):
|
144 |
-
"""Load waveform, text, and target of an audio clip
|
145 |
-
|
146 |
-
Parameters
|
147 |
-
----------
|
148 |
-
index: int
|
149 |
-
the index number
|
150 |
-
Return
|
151 |
-
------
|
152 |
-
output: dict {
|
153 |
-
"hdf5_path": str,
|
154 |
-
"index_in_hdf5": int,
|
155 |
-
"audio_name": str,
|
156 |
-
"waveform": list (audio_length,),
|
157 |
-
"target": list (class_num, ),
|
158 |
-
"text": torch.tensor (context_length,)
|
159 |
-
}
|
160 |
-
the output dictionary
|
161 |
-
"""
|
162 |
-
s_index = self.queue[index]
|
163 |
-
|
164 |
-
audio_name = self.fp["audio_name"][s_index].decode()
|
165 |
-
# Hardcode here CHANGE
|
166 |
-
hdf5_path = (
|
167 |
-
self.fp["hdf5_path"][s_index]
|
168 |
-
.decode()
|
169 |
-
.replace(
|
170 |
-
"../workspace",
|
171 |
-
"/home/la/kechen/Research/ke_zsasp/workspace",
|
172 |
-
)
|
173 |
-
)
|
174 |
-
r_idx = self.fp["index_in_hdf5"][s_index]
|
175 |
-
target = self.fp["target"][s_index].astype(np.float32)
|
176 |
-
text = self.prompt_text(target)
|
177 |
-
with h5py.File(hdf5_path, "r") as f:
|
178 |
-
waveform = int16_to_float32(f["waveform"][r_idx])[
|
179 |
-
: self.audio_cfg["clip_samples"]
|
180 |
-
]
|
181 |
-
assert (
|
182 |
-
len(waveform) == self.audio_cfg["clip_samples"]
|
183 |
-
), "The sample length is not match"
|
184 |
-
# Time shift
|
185 |
-
# if (self.config.enable_time_shift) and (not self.eval_mode):
|
186 |
-
# waveform = self.time_shifting(waveform)
|
187 |
-
# # Label Enhance
|
188 |
-
# if (self.config.crop_size is not None) and (not self.eval_mode):
|
189 |
-
# waveform = self.crop_wav(waveform)
|
190 |
-
# # the label enhance rate is fixed 0.5
|
191 |
-
# if (self.config.enable_label_enhance) and (not self.eval_mode) and random.random() < 0.5:
|
192 |
-
# kidx = np.where(target)[0]
|
193 |
-
# for k in kidx:
|
194 |
-
# for add_key in self.class_map[k][1]:
|
195 |
-
# target[add_key] = 1.0
|
196 |
-
# if len(self.class_map[k][2]) > 0:
|
197 |
-
# add_key = random.choice(self.class_map[k][2])
|
198 |
-
# target[add_key] = 1.0
|
199 |
-
|
200 |
-
# missing the text input
|
201 |
-
mel_spec = get_mel(torch.from_numpy(waveform), self.audio_cfg)[None, :, :]
|
202 |
-
mel_spec = (
|
203 |
-
torch.cat(
|
204 |
-
[mel_spec, mel_spec.clone(), mel_spec.clone(), mel_spec.clone()], dim=0
|
205 |
-
)
|
206 |
-
.cpu()
|
207 |
-
.numpy()
|
208 |
-
)
|
209 |
-
longer = random.choice([True, False])
|
210 |
-
if longer == False:
|
211 |
-
mel_spec[1:, :, :] = 0.0
|
212 |
-
data_dict = {
|
213 |
-
"hdf5_path": hdf5_path,
|
214 |
-
"index_in_hdf5": r_idx,
|
215 |
-
"audio_name": audio_name,
|
216 |
-
"waveform": waveform,
|
217 |
-
"class_label": target,
|
218 |
-
"text": text,
|
219 |
-
"longer": longer,
|
220 |
-
"mel_fusion": mel_spec,
|
221 |
-
}
|
222 |
-
return data_dict
|
223 |
-
|
224 |
-
def __len__(self):
|
225 |
-
return self.total_size
|
226 |
-
|
227 |
-
|
228 |
-
class CsvDataset(Dataset):
|
229 |
-
def __init__(self, input_filename, transforms, img_key, caption_key, sep="\t"):
|
230 |
-
logging.debug(f"Loading csv data from {input_filename}.")
|
231 |
-
df = pd.read_csv(input_filename, sep=sep)
|
232 |
-
|
233 |
-
self.images = df[img_key].tolist()
|
234 |
-
self.captions = df[caption_key].tolist()
|
235 |
-
self.transforms = transforms
|
236 |
-
logging.debug("Done loading data.")
|
237 |
-
|
238 |
-
def __len__(self):
|
239 |
-
return len(self.captions)
|
240 |
-
|
241 |
-
def __getitem__(self, idx):
|
242 |
-
images = self.transforms(Image.open(str(self.images[idx])))
|
243 |
-
texts = tokenize([str(self.captions[idx])])[0]
|
244 |
-
return images, texts
|
245 |
-
|
246 |
-
|
247 |
-
@dataclass
|
248 |
-
class DataInfo:
|
249 |
-
dataloader: DataLoader
|
250 |
-
sampler: DistributedSampler
|
251 |
-
|
252 |
-
|
253 |
-
def preprocess_txt(text):
|
254 |
-
return tokenize([str(text)])[0]
|
255 |
-
|
256 |
-
|
257 |
-
def get_dataset_size(shards, sizefilepath_=None, is_local=True):
|
258 |
-
if isinstance(shards, list):
|
259 |
-
size_list = []
|
260 |
-
for s in shards:
|
261 |
-
size_list.append(
|
262 |
-
get_dataset_size(s, sizefilepath_=sizefilepath_, is_local=is_local)[0]
|
263 |
-
)
|
264 |
-
else:
|
265 |
-
if not is_local:
|
266 |
-
for n in dataset_split.keys():
|
267 |
-
if n in shards.split("/"):
|
268 |
-
break
|
269 |
-
for s in dataset_split[n]:
|
270 |
-
if s in shards.split("/"):
|
271 |
-
break
|
272 |
-
sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
|
273 |
-
shards_list = list(braceexpand.braceexpand(shards))
|
274 |
-
dir_path = os.path.dirname(shards)
|
275 |
-
if sizefilepath_ is not None:
|
276 |
-
sizes = json.load(open(sizefilepath_, "r"))
|
277 |
-
total_size = sum(
|
278 |
-
[
|
279 |
-
int(sizes[os.path.basename(shard.replace(".tar -", ".tar"))])
|
280 |
-
for shard in shards_list
|
281 |
-
]
|
282 |
-
)
|
283 |
-
else:
|
284 |
-
sizes_filename = os.path.join(dir_path, "sizes.json")
|
285 |
-
len_filename = os.path.join(dir_path, "__len__")
|
286 |
-
if os.path.exists(sizes_filename):
|
287 |
-
sizes = json.load(open(sizes_filename, "r"))
|
288 |
-
total_size = sum(
|
289 |
-
[int(sizes[os.path.basename(shard)]) for shard in shards_list]
|
290 |
-
)
|
291 |
-
elif os.path.exists(len_filename):
|
292 |
-
# FIXME this used to be eval(open(...)) but that seemed rather unsafe
|
293 |
-
total_size = ast.literal_eval(open(len_filename, "r").read())
|
294 |
-
else:
|
295 |
-
raise Exception(
|
296 |
-
"Cannot find sizes file for dataset. Please specify the path to the file."
|
297 |
-
)
|
298 |
-
# total_size = None # num samples undefined
|
299 |
-
# some common dataset sizes (at time of authors last download)
|
300 |
-
# cc3m-train: 2905954
|
301 |
-
# cc12m: 10968539
|
302 |
-
# LAION-400m: 407332084
|
303 |
-
num_shards = len(shards_list)
|
304 |
-
if isinstance(shards, list):
|
305 |
-
return sum(size_list), len(shards)
|
306 |
-
else:
|
307 |
-
return total_size, num_shards
|
308 |
-
|
309 |
-
|
310 |
-
def get_imagenet(args, preprocess_fns, split):
|
311 |
-
assert split in ["train", "val", "v2"]
|
312 |
-
is_train = split == "train"
|
313 |
-
preprocess_train, preprocess_val = preprocess_fns
|
314 |
-
|
315 |
-
if split == "v2":
|
316 |
-
from imagenetv2_pytorch import ImageNetV2Dataset
|
317 |
-
|
318 |
-
dataset = ImageNetV2Dataset(location=args.imagenet_v2, transform=preprocess_val)
|
319 |
-
else:
|
320 |
-
if is_train:
|
321 |
-
data_path = args.imagenet_train
|
322 |
-
preprocess_fn = preprocess_train
|
323 |
-
else:
|
324 |
-
data_path = args.imagenet_val
|
325 |
-
preprocess_fn = preprocess_val
|
326 |
-
assert data_path
|
327 |
-
|
328 |
-
dataset = datasets.ImageFolder(data_path, transform=preprocess_fn)
|
329 |
-
|
330 |
-
if is_train:
|
331 |
-
idxs = np.zeros(len(dataset.targets))
|
332 |
-
target_array = np.array(dataset.targets)
|
333 |
-
k = 50
|
334 |
-
for c in range(1000):
|
335 |
-
m = target_array == c
|
336 |
-
n = len(idxs[m])
|
337 |
-
arr = np.zeros(n)
|
338 |
-
arr[:k] = 1
|
339 |
-
np.random.shuffle(arr)
|
340 |
-
idxs[m] = arr
|
341 |
-
|
342 |
-
idxs = idxs.astype("int")
|
343 |
-
sampler = SubsetRandomSampler(np.where(idxs)[0])
|
344 |
-
else:
|
345 |
-
sampler = None
|
346 |
-
|
347 |
-
dataloader = torch.utils.data.DataLoader(
|
348 |
-
dataset,
|
349 |
-
batch_size=args.batch_size,
|
350 |
-
num_workers=args.workers,
|
351 |
-
sampler=sampler,
|
352 |
-
)
|
353 |
-
|
354 |
-
return DataInfo(dataloader, sampler)
|
355 |
-
|
356 |
-
|
357 |
-
def count_samples(dataloader):
|
358 |
-
os.environ["WDS_EPOCH"] = "0"
|
359 |
-
n_elements, n_batches = 0, 0
|
360 |
-
for images, texts in dataloader:
|
361 |
-
n_batches += 1
|
362 |
-
n_elements += len(images)
|
363 |
-
assert len(images) == len(texts)
|
364 |
-
return n_elements, n_batches
|
365 |
-
|
366 |
-
|
367 |
-
def filter_no_caption(sample):
|
368 |
-
return "txt" in sample
|
369 |
-
|
370 |
-
|
371 |
-
def log_and_continue(exn):
|
372 |
-
"""Call in an exception handler to ignore any exception, isssue a warning, and continue."""
|
373 |
-
logging.warning(f"Handling webdataset error ({repr(exn)}). Ignoring.")
|
374 |
-
return True
|
375 |
-
|
376 |
-
|
377 |
-
_SHARD_SHUFFLE_SIZE = 2000
|
378 |
-
_SHARD_SHUFFLE_INITIAL = 500
|
379 |
-
_SAMPLE_SHUFFLE_SIZE = 5000
|
380 |
-
_SAMPLE_SHUFFLE_INITIAL = 1000
|
381 |
-
|
382 |
-
|
383 |
-
def sample_prop(sizefile, inputs, proportion, is_local=True):
|
384 |
-
"""
|
385 |
-
Sample a proportion of the data.
|
386 |
-
"""
|
387 |
-
file_path_dict = {
|
388 |
-
os.path.split(inputs[i])[1]: os.path.split(inputs[i])[0]
|
389 |
-
for i in range(len(inputs))
|
390 |
-
}
|
391 |
-
sampled_filepath_dict = {}
|
392 |
-
sampled_size_dict = {}
|
393 |
-
if not is_local:
|
394 |
-
if os.path.exists("sizes.json"):
|
395 |
-
os.remove("sizes.json")
|
396 |
-
wget.download(sizefile, "sizes.json")
|
397 |
-
sizefile = "sizes.json"
|
398 |
-
with open(sizefile, "r", encoding="UTF-8") as f:
|
399 |
-
load_dict = json.load(f)
|
400 |
-
L = int(len(file_path_dict) * proportion)
|
401 |
-
subkeys = random.sample(file_path_dict.keys(), L)
|
402 |
-
for k in subkeys:
|
403 |
-
sampled_size_dict[k] = load_dict[k]
|
404 |
-
sampled_filepath_dict[k] = file_path_dict[k]
|
405 |
-
return (
|
406 |
-
sum(sampled_size_dict.values()),
|
407 |
-
L,
|
408 |
-
[os.path.join(v, k) for k, v in sampled_filepath_dict.items()],
|
409 |
-
sampled_size_dict,
|
410 |
-
)
|
411 |
-
|
412 |
-
|
413 |
-
def get_mel(audio_data, audio_cfg):
|
414 |
-
# mel shape: (n_mels, T)
|
415 |
-
mel = torchaudio.transforms.MelSpectrogram(
|
416 |
-
sample_rate=audio_cfg["sample_rate"],
|
417 |
-
n_fft=audio_cfg["window_size"],
|
418 |
-
win_length=audio_cfg["window_size"],
|
419 |
-
hop_length=audio_cfg["hop_size"],
|
420 |
-
center=True,
|
421 |
-
pad_mode="reflect",
|
422 |
-
power=2.0,
|
423 |
-
norm=None,
|
424 |
-
onesided=True,
|
425 |
-
n_mels=64,
|
426 |
-
f_min=audio_cfg["fmin"],
|
427 |
-
f_max=audio_cfg["fmax"],
|
428 |
-
).to(audio_data.device)
|
429 |
-
mel = mel(audio_data)
|
430 |
-
# Align to librosa:
|
431 |
-
# librosa_melspec = librosa.feature.melspectrogram(
|
432 |
-
# waveform,
|
433 |
-
# sr=audio_cfg['sample_rate'],
|
434 |
-
# n_fft=audio_cfg['window_size'],
|
435 |
-
# hop_length=audio_cfg['hop_size'],
|
436 |
-
# win_length=audio_cfg['window_size'],
|
437 |
-
# center=True,
|
438 |
-
# pad_mode="reflect",
|
439 |
-
# power=2.0,
|
440 |
-
# n_mels=64,
|
441 |
-
# norm=None,
|
442 |
-
# htk=True,
|
443 |
-
# f_min=audio_cfg['fmin'],
|
444 |
-
# f_max=audio_cfg['fmax']
|
445 |
-
# )
|
446 |
-
# we use log mel spectrogram as input
|
447 |
-
mel = torchaudio.transforms.AmplitudeToDB(top_db=None)(mel)
|
448 |
-
return mel.T # (T, n_mels)
|
449 |
-
|
450 |
-
|
451 |
-
def get_audio_features(
|
452 |
-
sample, audio_data, max_len, data_truncating, data_filling, audio_cfg
|
453 |
-
):
|
454 |
-
"""
|
455 |
-
Calculate and add audio features to sample.
|
456 |
-
Sample: a dict containing all the data of current sample.
|
457 |
-
audio_data: a tensor of shape (T) containing audio data.
|
458 |
-
max_len: the maximum length of audio data.
|
459 |
-
data_truncating: the method of truncating data.
|
460 |
-
data_filling: the method of filling data.
|
461 |
-
audio_cfg: a dict containing audio configuration. Comes from model_cfg['audio_cfg'].
|
462 |
-
"""
|
463 |
-
with torch.no_grad():
|
464 |
-
if len(audio_data) > max_len:
|
465 |
-
if data_truncating == "rand_trunc":
|
466 |
-
longer = torch.tensor([True])
|
467 |
-
elif data_truncating == "fusion":
|
468 |
-
# fusion
|
469 |
-
mel = get_mel(audio_data, audio_cfg)
|
470 |
-
# split to three parts
|
471 |
-
chunk_frames = (
|
472 |
-
max_len // audio_cfg["hop_size"] + 1
|
473 |
-
) # the +1 related to how the spectrogram is computed
|
474 |
-
total_frames = mel.shape[0]
|
475 |
-
if chunk_frames == total_frames:
|
476 |
-
# there is a corner case where the audio length is
|
477 |
-
# larger than max_len but smaller than max_len+hop_size.
|
478 |
-
# In this case, we just use the whole audio.
|
479 |
-
mel_fusion = torch.stack([mel, mel, mel, mel], dim=0)
|
480 |
-
sample["mel_fusion"] = mel_fusion
|
481 |
-
longer = torch.tensor([False])
|
482 |
-
else:
|
483 |
-
ranges = np.array_split(
|
484 |
-
list(range(0, total_frames - chunk_frames + 1)), 3
|
485 |
-
)
|
486 |
-
# print('total_frames-chunk_frames:', total_frames-chunk_frames,
|
487 |
-
# 'len(audio_data):', len(audio_data),
|
488 |
-
# 'chunk_frames:', chunk_frames,
|
489 |
-
# 'total_frames:', total_frames)
|
490 |
-
if len(ranges[1]) == 0:
|
491 |
-
# if the audio is too short, we just use the first chunk
|
492 |
-
ranges[1] = [0]
|
493 |
-
if len(ranges[2]) == 0:
|
494 |
-
# if the audio is too short, we just use the first chunk
|
495 |
-
ranges[2] = [0]
|
496 |
-
# randomly choose index for each part
|
497 |
-
idx_front = np.random.choice(ranges[0])
|
498 |
-
idx_middle = np.random.choice(ranges[1])
|
499 |
-
idx_back = np.random.choice(ranges[2])
|
500 |
-
# select mel
|
501 |
-
mel_chunk_front = mel[idx_front : idx_front + chunk_frames, :]
|
502 |
-
mel_chunk_middle = mel[idx_middle : idx_middle + chunk_frames, :]
|
503 |
-
mel_chunk_back = mel[idx_back : idx_back + chunk_frames, :]
|
504 |
-
|
505 |
-
# shrink the mel
|
506 |
-
mel_shrink = torchvision.transforms.Resize(size=[chunk_frames, 64])(
|
507 |
-
mel[None]
|
508 |
-
)[0]
|
509 |
-
# logging.info(f"mel_shrink.shape: {mel_shrink.shape}")
|
510 |
-
|
511 |
-
# stack
|
512 |
-
mel_fusion = torch.stack(
|
513 |
-
[mel_chunk_front, mel_chunk_middle, mel_chunk_back, mel_shrink],
|
514 |
-
dim=0,
|
515 |
-
)
|
516 |
-
sample["mel_fusion"] = mel_fusion
|
517 |
-
longer = torch.tensor([True])
|
518 |
-
else:
|
519 |
-
raise NotImplementedError(
|
520 |
-
f"data_truncating {data_truncating} not implemented"
|
521 |
-
)
|
522 |
-
# random crop to max_len (for compatibility)
|
523 |
-
overflow = len(audio_data) - max_len
|
524 |
-
idx = np.random.randint(0, overflow + 1)
|
525 |
-
audio_data = audio_data[idx : idx + max_len]
|
526 |
-
|
527 |
-
else: # padding if too short
|
528 |
-
if len(audio_data) < max_len: # do nothing if equal
|
529 |
-
if data_filling == "repeatpad":
|
530 |
-
n_repeat = int(max_len / len(audio_data))
|
531 |
-
audio_data = audio_data.repeat(n_repeat)
|
532 |
-
# audio_data = audio_data.unsqueeze(0).unsqueeze(0).unsqueeze(0)
|
533 |
-
# audio_data = F.interpolate(audio_data,size=max_len,mode="bicubic")[0,0,0]
|
534 |
-
audio_data = F.pad(
|
535 |
-
audio_data,
|
536 |
-
(0, max_len - len(audio_data)),
|
537 |
-
mode="constant",
|
538 |
-
value=0,
|
539 |
-
)
|
540 |
-
elif data_filling == "pad":
|
541 |
-
audio_data = F.pad(
|
542 |
-
audio_data,
|
543 |
-
(0, max_len - len(audio_data)),
|
544 |
-
mode="constant",
|
545 |
-
value=0,
|
546 |
-
)
|
547 |
-
elif data_filling == "repeat":
|
548 |
-
n_repeat = int(max_len / len(audio_data))
|
549 |
-
audio_data = audio_data.repeat(n_repeat + 1)[:max_len]
|
550 |
-
else:
|
551 |
-
raise NotImplementedError(
|
552 |
-
f"data_filling {data_filling} not implemented"
|
553 |
-
)
|
554 |
-
if data_truncating == "fusion":
|
555 |
-
mel = get_mel(audio_data, audio_cfg)
|
556 |
-
mel_fusion = torch.stack([mel, mel, mel, mel], dim=0)
|
557 |
-
sample["mel_fusion"] = mel_fusion
|
558 |
-
longer = torch.tensor([False])
|
559 |
-
|
560 |
-
sample["longer"] = longer
|
561 |
-
sample["waveform"] = audio_data
|
562 |
-
|
563 |
-
return sample
|
564 |
-
|
565 |
-
|
566 |
-
def preprocess(
|
567 |
-
sample,
|
568 |
-
audio_ext,
|
569 |
-
text_ext,
|
570 |
-
max_len,
|
571 |
-
audio_cfg,
|
572 |
-
class_index_dict=None,
|
573 |
-
data_filling="pad",
|
574 |
-
data_truncating="rand_trunc",
|
575 |
-
text_augment_selection=None,
|
576 |
-
):
|
577 |
-
"""
|
578 |
-
Preprocess a single sample for wdsdataloader.
|
579 |
-
"""
|
580 |
-
audio_data, orig_sr = sf.read(io.BytesIO(sample[audio_ext]))
|
581 |
-
audio_data = int16_to_float32(float32_to_int16(audio_data))
|
582 |
-
audio_data = torch.tensor(audio_data).float()
|
583 |
-
|
584 |
-
# TODO: (yusong) to be include in the future
|
585 |
-
# # if torchaudio not installed, use soundfile to load audio
|
586 |
-
# if torchaudio is None:
|
587 |
-
# audio_data, orig_sr = sf.read(io.BytesIO(sample[audio_ext]))
|
588 |
-
# audio_data = torch.tensor(audio_data).float()
|
589 |
-
# else:
|
590 |
-
# # https://github.com/webdataset/webdataset/blob/main/webdataset/autodecode.py
|
591 |
-
# with tempfile.TemporaryDirectory() as dirname:
|
592 |
-
# os.makedirs(dirname, exist_ok=True)
|
593 |
-
# fname = os.path.join(dirname, f"file.flac")
|
594 |
-
# with open(fname, "wb") as stream:
|
595 |
-
# stream.write(sample[audio_ext])
|
596 |
-
# audio_data, orig_sr = torchaudio.load(fname)
|
597 |
-
# audio_data = audio_data[0, :].float()
|
598 |
-
|
599 |
-
sample = get_audio_features(
|
600 |
-
sample, audio_data, max_len, data_truncating, data_filling, audio_cfg
|
601 |
-
)
|
602 |
-
del sample[audio_ext]
|
603 |
-
|
604 |
-
try:
|
605 |
-
json_dict_raw = json.loads(sample[text_ext].decode("utf-8"))
|
606 |
-
except:
|
607 |
-
print("sample[__url__]:", sample["__url__"])
|
608 |
-
|
609 |
-
# For selecting augmented text from dataset
|
610 |
-
if text_augment_selection is None or text_augment_selection == "none":
|
611 |
-
texts = json_dict_raw["text"]
|
612 |
-
elif text_augment_selection == "all":
|
613 |
-
if "text_augment_all" in json_dict_raw.keys():
|
614 |
-
texts = json_dict_raw["text_augment_all"]
|
615 |
-
else:
|
616 |
-
texts = json_dict_raw["text"]
|
617 |
-
elif text_augment_selection == "augment_only":
|
618 |
-
if "text_augment_all" in json_dict_raw.keys():
|
619 |
-
if json_dict_raw["text_augment_t5"] is None:
|
620 |
-
texts = json_dict_raw["text"]
|
621 |
-
else:
|
622 |
-
texts = json_dict_raw["text_augment_t5"]
|
623 |
-
else:
|
624 |
-
texts = json_dict_raw["text"]
|
625 |
-
else:
|
626 |
-
raise NotImplementedError(
|
627 |
-
f"text_augment_selection {text_augment_selection} not implemented"
|
628 |
-
)
|
629 |
-
sample["full_text"] = texts
|
630 |
-
|
631 |
-
if isinstance(texts, list) and isinstance(texts[0], str) and len(texts) > 1:
|
632 |
-
texts = random.choice(texts)
|
633 |
-
sample["raw_text"] = texts
|
634 |
-
sample["text"] = tokenizer(texts) # text shape: [num_token]
|
635 |
-
if class_index_dict is not None:
|
636 |
-
# https://stackoverflow.com/questions/48004243/how-to-share-large-read-only-dictionary-list-across-processes-in-multiprocessing
|
637 |
-
# https://stackoverflow.com/questions/45693949/storing-strings-in-a-multiprocessing-sharedctypes-array
|
638 |
-
# key, val = class_index_dict
|
639 |
-
# key = key[:].split('\n')
|
640 |
-
# _dict = {k: v for k, v in zip(key, val)}
|
641 |
-
sample["class_label"] = np.zeros(len(class_index_dict.keys()))
|
642 |
-
for x in json_dict_raw["tag"]:
|
643 |
-
sample["class_label"][class_index_dict[x]] = 1
|
644 |
-
sample["class_label"] = torch.tensor(sample["class_label"]).float()
|
645 |
-
del sample[text_ext]
|
646 |
-
sample["audio_name"] = sample["__key__"].split("/")[-1] + "." + audio_ext
|
647 |
-
sample["text_name"] = sample["__key__"].split("/")[-1] + "." + text_ext
|
648 |
-
sample["audio_orig_sr"] = orig_sr
|
649 |
-
return sample
|
650 |
-
|
651 |
-
|
652 |
-
def collate_fn(batch):
|
653 |
-
"""
|
654 |
-
Collate function for wdsdataloader.
|
655 |
-
batch: a list of dict, each dict is a sample
|
656 |
-
"""
|
657 |
-
# concatenate values in each dictionary. if it is a tensor, concatenate. if it is a list, extend.
|
658 |
-
batch_dict = {}
|
659 |
-
for k in batch[0].keys():
|
660 |
-
if isinstance(batch[0][k], dict): # dealwith bert tokenizer output
|
661 |
-
batch_dict[k] = {}
|
662 |
-
for kk in batch[0][k].keys():
|
663 |
-
tmp = []
|
664 |
-
for i in range(len(batch)):
|
665 |
-
tmp.append(batch[i][k][kk])
|
666 |
-
batch_dict[k][kk] = torch.vstack(tmp)
|
667 |
-
elif isinstance(batch[0][k], torch.Tensor):
|
668 |
-
batch_dict[k] = torch.stack([sample[k] for sample in batch])
|
669 |
-
elif isinstance(batch[0][k], np.ndarray):
|
670 |
-
batch_dict[k] = torch.tensor(np.stack([sample[k] for sample in batch]))
|
671 |
-
else:
|
672 |
-
batch_dict[k] = [sample[k] for sample in batch]
|
673 |
-
return batch_dict
|
674 |
-
|
675 |
-
|
676 |
-
def get_wds_dataset(
|
677 |
-
args,
|
678 |
-
model_cfg,
|
679 |
-
is_train,
|
680 |
-
audio_ext="flac",
|
681 |
-
text_ext="json",
|
682 |
-
max_len=480000,
|
683 |
-
proportion=1.0,
|
684 |
-
sizefilepath_=None,
|
685 |
-
is_local=None,
|
686 |
-
):
|
687 |
-
"""
|
688 |
-
Get a dataset for wdsdataloader.
|
689 |
-
"""
|
690 |
-
if is_local is None and (not args.remotedata is None):
|
691 |
-
is_local = not args.remotedata
|
692 |
-
|
693 |
-
input_shards = args.train_data if is_train else args.val_data
|
694 |
-
assert input_shards is not None
|
695 |
-
|
696 |
-
if not sizefilepath_ is None:
|
697 |
-
sizefilepath = sizefilepath_
|
698 |
-
else:
|
699 |
-
sizefilepath = os.path.join(os.path.dirname(input_shards[0]), "sizes.json")
|
700 |
-
|
701 |
-
if proportion != 1.0:
|
702 |
-
num_samples, num_shards, input_shards, _ = sample_prop(
|
703 |
-
sizefilepath, input_shards, proportion, is_local=is_local
|
704 |
-
)
|
705 |
-
else:
|
706 |
-
num_samples, num_shards = get_dataset_size(
|
707 |
-
input_shards, sizefilepath_=sizefilepath_, is_local=is_local
|
708 |
-
)
|
709 |
-
|
710 |
-
if not num_samples:
|
711 |
-
if is_train:
|
712 |
-
num_samples = args.train_num_samples
|
713 |
-
if not num_samples:
|
714 |
-
raise RuntimeError(
|
715 |
-
"Currently, number of dataset samples must be specified for training dataset. "
|
716 |
-
"Please specify via `--train-num-samples` if no dataset length info present."
|
717 |
-
)
|
718 |
-
else:
|
719 |
-
num_samples = (
|
720 |
-
args.val_num_samples or 0
|
721 |
-
) # eval will just exhaust the iterator if not specified
|
722 |
-
|
723 |
-
pipeline = [wds.SimpleShardList(input_shards)]
|
724 |
-
# at this point we have an iterator over all the shards
|
725 |
-
# TODO: (yusong): add a if statement of distributed. If not, we don't need to split_by_node
|
726 |
-
if is_train or args.parallel_eval:
|
727 |
-
pipeline.extend(
|
728 |
-
[
|
729 |
-
wds.detshuffle(
|
730 |
-
bufsize=_SHARD_SHUFFLE_SIZE,
|
731 |
-
initial=_SHARD_SHUFFLE_INITIAL,
|
732 |
-
seed=args.seed,
|
733 |
-
),
|
734 |
-
wds.split_by_node,
|
735 |
-
wds.split_by_worker,
|
736 |
-
# at this point, we have an iterator over the shards assigned to each worker at each node
|
737 |
-
wds.tarfile_to_samples(handler=log_and_continue),
|
738 |
-
wds.shuffle(
|
739 |
-
bufsize=_SAMPLE_SHUFFLE_SIZE,
|
740 |
-
initial=_SAMPLE_SHUFFLE_INITIAL,
|
741 |
-
rng=random.Random(args.seed),
|
742 |
-
),
|
743 |
-
# wds.repeatedly, # FIXME determine if this is beneficial
|
744 |
-
]
|
745 |
-
)
|
746 |
-
else:
|
747 |
-
pipeline.extend(
|
748 |
-
[
|
749 |
-
wds.split_by_worker,
|
750 |
-
# at this point, we have an iterator over the shards assigned to each worker
|
751 |
-
wds.tarfile_to_samples(handler=log_and_continue),
|
752 |
-
]
|
753 |
-
)
|
754 |
-
pipeline.append(
|
755 |
-
wds.map(
|
756 |
-
partial(
|
757 |
-
preprocess,
|
758 |
-
audio_ext=audio_ext,
|
759 |
-
text_ext=text_ext,
|
760 |
-
max_len=max_len,
|
761 |
-
audio_cfg=model_cfg["audio_cfg"],
|
762 |
-
class_index_dict=copy.deepcopy(args.class_index_dict),
|
763 |
-
data_filling=args.data_filling,
|
764 |
-
data_truncating=args.data_truncating,
|
765 |
-
text_augment_selection=args.text_augment_selection,
|
766 |
-
)
|
767 |
-
),
|
768 |
-
)
|
769 |
-
|
770 |
-
pipeline.append(
|
771 |
-
wds.batched(
|
772 |
-
args.batch_size,
|
773 |
-
partial=not (is_train or args.parallel_eval),
|
774 |
-
collation_fn=collate_fn,
|
775 |
-
)
|
776 |
-
)
|
777 |
-
|
778 |
-
dataset = wds.DataPipeline(*pipeline)
|
779 |
-
if is_train or args.parallel_eval:
|
780 |
-
# (yusong): Currently parallel evaluation will be not precise as we are repeat the last few samples.
|
781 |
-
# (yusong): See comments below.
|
782 |
-
# roll over and repeat a few samples to get same number of full batches on each node
|
783 |
-
global_batch_size = args.batch_size * args.world_size
|
784 |
-
num_batches = math.ceil(num_samples / global_batch_size)
|
785 |
-
num_workers = max(1, args.workers)
|
786 |
-
num_worker_batches = math.ceil(
|
787 |
-
num_batches / num_workers
|
788 |
-
) # per dataloader worker
|
789 |
-
num_batches = num_worker_batches * num_workers
|
790 |
-
num_samples = num_batches * global_batch_size
|
791 |
-
dataset = dataset.with_epoch(
|
792 |
-
num_worker_batches
|
793 |
-
) # each worker is iterating over this
|
794 |
-
else:
|
795 |
-
# last batches are partial, eval is done on single (master) node
|
796 |
-
num_batches = math.ceil(num_samples / args.batch_size)
|
797 |
-
|
798 |
-
kwargs = {}
|
799 |
-
if args.horovod: # multi-node training on summit
|
800 |
-
kwargs["multiprocessing_context"] = "forkserver"
|
801 |
-
|
802 |
-
dataloader = wds.WebLoader(
|
803 |
-
dataset, batch_size=None, shuffle=False, num_workers=args.workers, **kwargs
|
804 |
-
)
|
805 |
-
|
806 |
-
# FIXME not clear which approach is better, with_epoch before vs after dataloader?
|
807 |
-
# hoping to resolve via https://github.com/webdataset/webdataset/issues/169
|
808 |
-
# if is_train:
|
809 |
-
# # roll over and repeat a few samples to get same number of full batches on each node
|
810 |
-
# global_batch_size = args.batch_size * args.world_size
|
811 |
-
# num_batches = math.ceil(num_samples / global_batch_size)
|
812 |
-
# num_workers = max(1, args.workers)
|
813 |
-
# num_batches = math.ceil(num_batches / num_workers) * num_workers
|
814 |
-
# num_samples = num_batches * global_batch_size
|
815 |
-
# dataloader = dataloader.with_epoch(num_batches)
|
816 |
-
# else:
|
817 |
-
# # last batches are partial, eval is done on single (master) node
|
818 |
-
# num_batches = math.ceil(num_samples / args.batch_size)
|
819 |
-
|
820 |
-
# add meta-data to dataloader instance for convenience
|
821 |
-
dataloader.num_batches = num_batches
|
822 |
-
dataloader.num_samples = num_samples
|
823 |
-
|
824 |
-
return DataInfo(dataloader, None)
|
825 |
-
|
826 |
-
|
827 |
-
def wds_batch_list2dict(
|
828 |
-
batch,
|
829 |
-
keys=[
|
830 |
-
"__url__",
|
831 |
-
"__key__",
|
832 |
-
"waveform",
|
833 |
-
"text",
|
834 |
-
"raw_text",
|
835 |
-
"audio_name",
|
836 |
-
"text_name",
|
837 |
-
"audio_orig_sr",
|
838 |
-
],
|
839 |
-
):
|
840 |
-
"""
|
841 |
-
Return a dictionary of the batch, with keys as the names of the fields.
|
842 |
-
"""
|
843 |
-
assert len(keys) == len(
|
844 |
-
batch
|
845 |
-
), "batch must have same number of keys as keys argument"
|
846 |
-
return {keys[i]: batch[i] for i in range(len(batch))}
|
847 |
-
|
848 |
-
|
849 |
-
def get_csv_dataset(args, preprocess_fn, is_train):
|
850 |
-
input_filename = args.train_data if is_train else args.val_data
|
851 |
-
assert input_filename
|
852 |
-
dataset = CsvDataset(
|
853 |
-
input_filename,
|
854 |
-
preprocess_fn,
|
855 |
-
img_key=args.csv_img_key,
|
856 |
-
caption_key=args.csv_caption_key,
|
857 |
-
sep=args.csv_separator,
|
858 |
-
)
|
859 |
-
num_samples = len(dataset)
|
860 |
-
sampler = DistributedSampler(dataset) if args.distributed and is_train else None
|
861 |
-
shuffle = is_train and sampler is None
|
862 |
-
|
863 |
-
dataloader = DataLoader(
|
864 |
-
dataset,
|
865 |
-
batch_size=args.batch_size,
|
866 |
-
shuffle=shuffle,
|
867 |
-
num_workers=args.workers,
|
868 |
-
pin_memory=True,
|
869 |
-
sampler=sampler,
|
870 |
-
drop_last=is_train,
|
871 |
-
)
|
872 |
-
dataloader.num_samples = num_samples
|
873 |
-
dataloader.num_batches = len(dataloader)
|
874 |
-
|
875 |
-
return DataInfo(dataloader, sampler)
|
876 |
-
|
877 |
-
|
878 |
-
def get_toy_dataset(args, model_cfg, is_train):
|
879 |
-
index_path = args.train_data if is_train else args.val_data
|
880 |
-
ipc_path = args.train_ipc if is_train else args.val_ipc
|
881 |
-
assert index_path and ipc_path
|
882 |
-
eval_mode = not is_train
|
883 |
-
dataset = ToyDataset(index_path, ipc_path, model_cfg, eval_mode=eval_mode)
|
884 |
-
|
885 |
-
num_samples = len(dataset)
|
886 |
-
sampler = (
|
887 |
-
DistributedSampler(dataset, shuffle=False)
|
888 |
-
if args.distributed and is_train
|
889 |
-
else None
|
890 |
-
)
|
891 |
-
|
892 |
-
dataloader = DataLoader(
|
893 |
-
dataset,
|
894 |
-
batch_size=args.batch_size,
|
895 |
-
shuffle=False,
|
896 |
-
num_workers=args.workers,
|
897 |
-
sampler=sampler,
|
898 |
-
drop_last=is_train,
|
899 |
-
)
|
900 |
-
dataloader.num_samples = num_samples
|
901 |
-
dataloader.num_batches = len(dataloader)
|
902 |
-
|
903 |
-
return DataInfo(dataloader, sampler)
|
904 |
-
|
905 |
-
|
906 |
-
def get_dataset_fn(data_path, dataset_type):
|
907 |
-
if dataset_type == "webdataset":
|
908 |
-
return get_wds_dataset
|
909 |
-
elif dataset_type == "csv":
|
910 |
-
return get_csv_dataset
|
911 |
-
elif dataset_type == "auto":
|
912 |
-
ext = data_path.split(".")[-1]
|
913 |
-
if ext in ["csv", "tsv"]:
|
914 |
-
return get_csv_dataset
|
915 |
-
elif ext in ["tar"]:
|
916 |
-
return get_wds_dataset
|
917 |
-
else:
|
918 |
-
raise ValueError(
|
919 |
-
f"Tried to figure out dataset type, but failed for extention {ext}."
|
920 |
-
)
|
921 |
-
elif dataset_type == "toy":
|
922 |
-
return get_toy_dataset
|
923 |
-
else:
|
924 |
-
raise ValueError(f"Unsupported dataset type: {dataset_type}")
|
925 |
-
|
926 |
-
|
927 |
-
def get_data(args, model_cfg):
|
928 |
-
data = {}
|
929 |
-
|
930 |
-
args.class_index_dict = load_class_label(args.class_label_path)
|
931 |
-
|
932 |
-
if args.datasetinfos is None:
|
933 |
-
args.datasetinfos = ["train", "unbalanced_train", "balanced_train"]
|
934 |
-
if args.dataset_type == "webdataset":
|
935 |
-
args.train_data = get_tar_path_from_dataset_name(
|
936 |
-
args.datasetnames,
|
937 |
-
args.datasetinfos,
|
938 |
-
islocal=not args.remotedata,
|
939 |
-
proportion=args.dataset_proportion,
|
940 |
-
dataset_path=args.datasetpath,
|
941 |
-
full_dataset=args.full_train_dataset,
|
942 |
-
)
|
943 |
-
|
944 |
-
if args.full_train_dataset is None:
|
945 |
-
args.full_train_dataset = []
|
946 |
-
if args.exclude_eval_dataset is None:
|
947 |
-
args.exclude_eval_dataset = []
|
948 |
-
excluded_eval_datasets = args.full_train_dataset + args.exclude_eval_dataset
|
949 |
-
|
950 |
-
val_dataset_names = (
|
951 |
-
[n for n in args.datasetnames if n not in excluded_eval_datasets]
|
952 |
-
if excluded_eval_datasets
|
953 |
-
else args.datasetnames
|
954 |
-
)
|
955 |
-
args.val_dataset_names = val_dataset_names
|
956 |
-
args.val_data = get_tar_path_from_dataset_name(
|
957 |
-
val_dataset_names,
|
958 |
-
["valid", "test", "eval"],
|
959 |
-
islocal=not args.remotedata,
|
960 |
-
proportion=1,
|
961 |
-
dataset_path=args.datasetpath,
|
962 |
-
full_dataset=None,
|
963 |
-
)
|
964 |
-
|
965 |
-
if args.train_data:
|
966 |
-
data["train"] = get_dataset_fn(args.train_data, args.dataset_type)(
|
967 |
-
args, model_cfg, is_train=True
|
968 |
-
)
|
969 |
-
|
970 |
-
if args.val_data:
|
971 |
-
data["val"] = get_dataset_fn(args.val_data, args.dataset_type)(
|
972 |
-
args, model_cfg, is_train=False
|
973 |
-
)
|
974 |
-
|
975 |
-
return data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/text/text_decoder.py
DELETED
@@ -1,672 +0,0 @@
|
|
1 |
-
# Modified by Jialian Wu from
|
2 |
-
# https://github.com/microsoft/GenerativeImage2Text/blob/main/generativeimage2text/layers/decoder.py
|
3 |
-
# and https://github.com/kdexd/virtex
|
4 |
-
from torch import nn
|
5 |
-
import torch
|
6 |
-
import functools
|
7 |
-
from torch.nn import functional as F
|
8 |
-
import warnings
|
9 |
-
|
10 |
-
|
11 |
-
class TextualHead(nn.Module):
|
12 |
-
def __init__(self,
|
13 |
-
visual_feature_size: int, vocab_size: int, hidden_size: int):
|
14 |
-
super().__init__()
|
15 |
-
self.visual_feature_size = visual_feature_size
|
16 |
-
self.vocab_size = vocab_size
|
17 |
-
self.hidden_size = hidden_size
|
18 |
-
|
19 |
-
@property
|
20 |
-
def textual_feature_size(self):
|
21 |
-
return self.hidden_size
|
22 |
-
|
23 |
-
|
24 |
-
class WordAndPositionalEmbedding(nn.Module):
|
25 |
-
def __init__(
|
26 |
-
self,
|
27 |
-
vocab_size: int,
|
28 |
-
hidden_size: int,
|
29 |
-
dropout: float = 0.0,
|
30 |
-
max_caption_length: int = 30,
|
31 |
-
padding_idx: int = 0,
|
32 |
-
):
|
33 |
-
super().__init__()
|
34 |
-
self.vocab_size = vocab_size
|
35 |
-
self.padding_idx = padding_idx
|
36 |
-
|
37 |
-
#self.words = nn.Embedding(vocab_size, hidden_size, padding_idx=padding_idx)
|
38 |
-
self.words = nn.Embedding(vocab_size, hidden_size)
|
39 |
-
|
40 |
-
# We provide no "padding index" for positional embeddings. We zero out
|
41 |
-
# the positional embeddings of padded positions as a post-processing.
|
42 |
-
self.positions = nn.Embedding(max_caption_length, hidden_size)
|
43 |
-
self.layer_norm = nn.LayerNorm(
|
44 |
-
hidden_size, eps=1e-8, elementwise_affine=True
|
45 |
-
)
|
46 |
-
self.dropout = nn.Dropout(p=dropout)
|
47 |
-
|
48 |
-
def forward(self, tokens: torch.Tensor):
|
49 |
-
position_indices = self._create_position_indices(tokens)
|
50 |
-
|
51 |
-
# shape: (batch_size, max_caption_length, hidden_size)
|
52 |
-
word_embeddings = self.words(tokens)
|
53 |
-
position_embeddings = self.positions(position_indices)
|
54 |
-
|
55 |
-
# shape: (batch_size, max_caption_length, hidden_size)
|
56 |
-
embeddings = self.layer_norm(word_embeddings + position_embeddings)
|
57 |
-
embeddings = self.dropout(embeddings)
|
58 |
-
|
59 |
-
return embeddings
|
60 |
-
|
61 |
-
@functools.lru_cache(maxsize=128)
|
62 |
-
def _create_position_indices(self, tokens: torch.Tensor):
|
63 |
-
|
64 |
-
# Create position indices of the same size as token indices.
|
65 |
-
batch_size, max_caption_length = tokens.size()
|
66 |
-
positions = torch.arange(
|
67 |
-
max_caption_length, dtype=tokens.dtype, device=tokens.device
|
68 |
-
)
|
69 |
-
# shape: (batch_size, max_caption_length)
|
70 |
-
positions = positions.unsqueeze(0).expand(batch_size, max_caption_length)
|
71 |
-
return positions
|
72 |
-
|
73 |
-
|
74 |
-
class BertEncoderAsDecoder(nn.Module):
|
75 |
-
def __init__(self, encoder):
|
76 |
-
super().__init__()
|
77 |
-
self.encoder = encoder
|
78 |
-
|
79 |
-
def forward(self, tgt, memory,
|
80 |
-
tgt_mask=None,
|
81 |
-
tgt_key_padding_mask=None,
|
82 |
-
memory_key_padding_mask=None,
|
83 |
-
tgt_bi_valid_mask=None,
|
84 |
-
encoder_history_states=None,
|
85 |
-
):
|
86 |
-
assert tgt_key_padding_mask is None, 'not supported'
|
87 |
-
assert tgt_mask.dim() == 2
|
88 |
-
assert tgt_mask.shape[0] == tgt_mask.shape[1]
|
89 |
-
# tgt_mask should always be 0/negative infinity
|
90 |
-
tgt = tgt.transpose(0, 1)
|
91 |
-
memory = memory.transpose(0, 1)
|
92 |
-
|
93 |
-
hidden_states = torch.cat((memory, tgt), dim=1)
|
94 |
-
num_tgt = tgt.shape[1]
|
95 |
-
num_memory = memory.shape[1]
|
96 |
-
device = tgt.device
|
97 |
-
dtype = tgt.dtype
|
98 |
-
top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
|
99 |
-
top_right = torch.full((num_memory, num_tgt), float('-inf'), device=tgt.device, dtype=dtype,)
|
100 |
-
bottom_left = torch.zeros((num_tgt, num_memory), dtype=dtype, device=tgt_mask.device,)
|
101 |
-
left = torch.cat((top_left, bottom_left), dim=0)
|
102 |
-
right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
|
103 |
-
|
104 |
-
full_attention_mask = torch.cat((left, right), dim=1)[None, :]
|
105 |
-
|
106 |
-
if memory_key_padding_mask is None:
|
107 |
-
memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
|
108 |
-
# if it is False, it means valid. That is, it is not a padding
|
109 |
-
assert memory_key_padding_mask.dtype == torch.bool
|
110 |
-
zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
|
111 |
-
zero_negative_infinity[memory_key_padding_mask] = float('-inf')
|
112 |
-
full_attention_mask = full_attention_mask.expand((memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + num_tgt))
|
113 |
-
full_attention_mask = full_attention_mask.clone()
|
114 |
-
origin_left = full_attention_mask[:, :, :num_memory]
|
115 |
-
update = zero_negative_infinity[:, None, :]
|
116 |
-
full_attention_mask[:, :, :num_memory] = origin_left + update
|
117 |
-
|
118 |
-
if tgt_bi_valid_mask is not None:
|
119 |
-
# verify the correctness
|
120 |
-
bs = full_attention_mask.shape[0]
|
121 |
-
# during inference, tgt_bi_valid_mask's length is not changed, but
|
122 |
-
# num_tgt can be increased
|
123 |
-
max_valid_target = tgt_bi_valid_mask.shape[1]
|
124 |
-
mask = tgt_bi_valid_mask[:, None, :].expand((bs, num_memory+num_tgt, max_valid_target))
|
125 |
-
full_attention_mask[:, :, num_memory:(num_memory+max_valid_target)][mask] = 0
|
126 |
-
|
127 |
-
# add axis for multi-head
|
128 |
-
full_attention_mask = full_attention_mask[:, None, :, :]
|
129 |
-
|
130 |
-
if encoder_history_states is None:
|
131 |
-
result = self.encoder(
|
132 |
-
hidden_states=hidden_states,
|
133 |
-
attention_mask=full_attention_mask,
|
134 |
-
encoder_history_states=encoder_history_states,
|
135 |
-
)
|
136 |
-
result = list(result)
|
137 |
-
result[0] = result[0][:, num_memory:].transpose(0, 1)
|
138 |
-
if self.encoder.output_hidden_states:
|
139 |
-
return result[0], result[1]
|
140 |
-
else:
|
141 |
-
# make it back-compatible
|
142 |
-
return result[0]
|
143 |
-
else:
|
144 |
-
encoder_out = self.encoder(
|
145 |
-
hidden_states=hidden_states[:, -1:],
|
146 |
-
attention_mask=full_attention_mask[:, :, -1:],
|
147 |
-
encoder_history_states=encoder_history_states,
|
148 |
-
)
|
149 |
-
result = encoder_out[0].transpose(0, 1)
|
150 |
-
if self.encoder.output_hidden_states:
|
151 |
-
return result, encoder_out[1]
|
152 |
-
else:
|
153 |
-
return result
|
154 |
-
|
155 |
-
|
156 |
-
def create_transformer(decoder_type, norm_type,
|
157 |
-
textual_feature_size,
|
158 |
-
attention_heads,
|
159 |
-
feedforward_size,
|
160 |
-
dropout,
|
161 |
-
num_layers,
|
162 |
-
output_hidden_states=False,
|
163 |
-
use_mlp_wrapper=None,
|
164 |
-
use_act_checkpoint=True,
|
165 |
-
):
|
166 |
-
assert norm_type in ['post', 'pre']
|
167 |
-
if decoder_type is None:
|
168 |
-
LayerClass = (
|
169 |
-
nn.TransformerDecoderLayer
|
170 |
-
if norm_type == "post"
|
171 |
-
else PreNormTransformerDecoderLayer
|
172 |
-
)
|
173 |
-
_layer = LayerClass(
|
174 |
-
textual_feature_size,
|
175 |
-
attention_heads,
|
176 |
-
dim_feedforward=feedforward_size,
|
177 |
-
dropout=dropout,
|
178 |
-
activation="gelu",
|
179 |
-
)
|
180 |
-
return nn.TransformerDecoder(_layer, num_layers)
|
181 |
-
elif decoder_type == 'bert_en':
|
182 |
-
from .modeling_bert import BertConfig, BertEncoder
|
183 |
-
config = BertConfig(
|
184 |
-
vocab_size_or_config_json_file=30522,
|
185 |
-
hidden_size=textual_feature_size,
|
186 |
-
num_hidden_layers=num_layers,
|
187 |
-
num_attention_heads=attention_heads,
|
188 |
-
intermediate_size=feedforward_size,
|
189 |
-
hidden_act="gelu",
|
190 |
-
hidden_dropout_prob=0.1,
|
191 |
-
attention_probs_dropout_prob=0.1,
|
192 |
-
layer_norm_eps=1e-12,
|
193 |
-
)
|
194 |
-
config.pre_norm = (norm_type == 'pre')
|
195 |
-
config.use_mlp_wrapper = use_mlp_wrapper
|
196 |
-
config.output_hidden_states = output_hidden_states
|
197 |
-
encoder = BertEncoder(config, use_act_checkpoint=use_act_checkpoint)
|
198 |
-
return BertEncoderAsDecoder(encoder)
|
199 |
-
|
200 |
-
|
201 |
-
class PreNormTransformerDecoderLayer(nn.TransformerDecoderLayer):
|
202 |
-
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
|
203 |
-
tgt_key_padding_mask=None, memory_key_padding_mask=None):
|
204 |
-
# fmt: off
|
205 |
-
# We use the members (modules) from super-class, just the order of
|
206 |
-
# operations is changed here. First layernorm, then attention.
|
207 |
-
tgt2 = self.norm1(tgt)
|
208 |
-
tgt2, _ = self.self_attn(
|
209 |
-
tgt2, tgt2, tgt2, attn_mask=tgt_mask,
|
210 |
-
key_padding_mask=tgt_key_padding_mask
|
211 |
-
)
|
212 |
-
tgt = tgt + self.dropout1(tgt2)
|
213 |
-
|
214 |
-
# Layernorm first, then decoder attention.
|
215 |
-
tgt2 = self.norm2(tgt)
|
216 |
-
tgt2, _ = self.multihead_attn(
|
217 |
-
tgt2, memory, memory, attn_mask=memory_mask,
|
218 |
-
key_padding_mask=memory_key_padding_mask
|
219 |
-
)
|
220 |
-
tgt = tgt + self.dropout2(tgt2)
|
221 |
-
|
222 |
-
# Layernorm first, then transformation through feedforward network.
|
223 |
-
tgt2 = self.norm3(tgt)
|
224 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
225 |
-
tgt = tgt + self.dropout3(tgt2)
|
226 |
-
return tgt
|
227 |
-
|
228 |
-
|
229 |
-
class TransformerDecoderTextualHead(TextualHead):
|
230 |
-
def __init__(
|
231 |
-
self,
|
232 |
-
object_feature_size: int,
|
233 |
-
vocab_size: int,
|
234 |
-
hidden_size: int,
|
235 |
-
num_layers: int,
|
236 |
-
attention_heads: int,
|
237 |
-
feedforward_size: int,
|
238 |
-
dropout: float = 0.1,
|
239 |
-
norm_type: str = "post",
|
240 |
-
mask_future_positions: bool = True,
|
241 |
-
max_caption_length: int = 1024,
|
242 |
-
padding_idx: int = 0,
|
243 |
-
decoder_type=None,
|
244 |
-
not_tie_weight=None,
|
245 |
-
output_hidden_states=None,
|
246 |
-
use_mlp_wrapper=None,
|
247 |
-
use_act_checkpoint=True,
|
248 |
-
):
|
249 |
-
super().__init__(object_feature_size, vocab_size, hidden_size)
|
250 |
-
self.num_layers = num_layers
|
251 |
-
self.attention_heads = attention_heads
|
252 |
-
self.feedforward_size = feedforward_size
|
253 |
-
self.dropout = dropout
|
254 |
-
assert mask_future_positions
|
255 |
-
self.padding_idx = padding_idx
|
256 |
-
|
257 |
-
self.object_feature_projection = nn.Sequential(
|
258 |
-
nn.Linear(object_feature_size, self.textual_feature_size),
|
259 |
-
nn.LayerNorm(self.textual_feature_size))
|
260 |
-
|
261 |
-
self.embedding = WordAndPositionalEmbedding(
|
262 |
-
self.vocab_size,
|
263 |
-
self.textual_feature_size,
|
264 |
-
dropout=dropout,
|
265 |
-
max_caption_length=max_caption_length,
|
266 |
-
padding_idx=padding_idx,
|
267 |
-
)
|
268 |
-
self.transformer = create_transformer(
|
269 |
-
decoder_type=decoder_type,
|
270 |
-
norm_type=norm_type,
|
271 |
-
textual_feature_size=self.textual_feature_size,
|
272 |
-
attention_heads=self.attention_heads,
|
273 |
-
feedforward_size=self.feedforward_size,
|
274 |
-
dropout=dropout,
|
275 |
-
num_layers=self.num_layers,
|
276 |
-
output_hidden_states=output_hidden_states,
|
277 |
-
use_mlp_wrapper=use_mlp_wrapper,
|
278 |
-
use_act_checkpoint=use_act_checkpoint,
|
279 |
-
)
|
280 |
-
self.apply(self._init_weights)
|
281 |
-
|
282 |
-
# Create an output linear layer and tie the input and output word
|
283 |
-
# embeddings to reduce parametejs.
|
284 |
-
self.output = nn.Linear(self.textual_feature_size, vocab_size)
|
285 |
-
if not not_tie_weight:
|
286 |
-
self.output.weight = self.embedding.words.weight
|
287 |
-
|
288 |
-
@staticmethod
|
289 |
-
def _init_weights(module):
|
290 |
-
"""Initialize weights like BERT - N(0.0, 0.02), bias = 0."""
|
291 |
-
|
292 |
-
if isinstance(module, nn.Linear):
|
293 |
-
module.weight.data.normal_(mean=0.0, std=0.02)
|
294 |
-
elif isinstance(module, nn.MultiheadAttention):
|
295 |
-
module.in_proj_weight.data.normal_(mean=0.0, std=0.02)
|
296 |
-
module.out_proj.weight.data.normal_(mean=0.0, std=0.02)
|
297 |
-
elif isinstance(module, nn.Embedding):
|
298 |
-
module.weight.data.normal_(mean=0.0, std=0.02)
|
299 |
-
if module.padding_idx is not None:
|
300 |
-
module.weight.data[module.padding_idx].zero_()
|
301 |
-
|
302 |
-
def forward(
|
303 |
-
self,
|
304 |
-
hidden_states,
|
305 |
-
text_tokens,
|
306 |
-
):
|
307 |
-
projected_object_features = self.object_feature_projection(hidden_states) if hidden_states is not None else None
|
308 |
-
batch_size, max_text_length = text_tokens.size()
|
309 |
-
text_embeddings = self.embedding(text_tokens)
|
310 |
-
|
311 |
-
# An additive mask for masking the future (one direction).
|
312 |
-
uni_mask_zero_neg = self._generate_future_mask(
|
313 |
-
max_text_length, text_embeddings.dtype, text_embeddings.device
|
314 |
-
)
|
315 |
-
|
316 |
-
# We transpose the first two dimensions of tokens embeddings and visual
|
317 |
-
# features, as required by decoder.
|
318 |
-
text_embeddings = text_embeddings.transpose(0, 1)
|
319 |
-
|
320 |
-
projected_object_features = projected_object_features.transpose(0, 1)
|
321 |
-
|
322 |
-
# if transformer here is the pytorch/decoder, there is no chance, the
|
323 |
-
# output is always tensor
|
324 |
-
trans_out = self.transformer(
|
325 |
-
text_embeddings,
|
326 |
-
projected_object_features,
|
327 |
-
tgt_mask=uni_mask_zero_neg,
|
328 |
-
)
|
329 |
-
if isinstance(trans_out, tuple):
|
330 |
-
textual_features = trans_out[0]
|
331 |
-
else:
|
332 |
-
assert isinstance(trans_out, torch.Tensor)
|
333 |
-
textual_features = trans_out
|
334 |
-
# Undo the transpose and bring batch to dim 0.
|
335 |
-
# shape: (batch_size, max_caption_length, hidden_size)
|
336 |
-
textual_features = textual_features.transpose(0, 1)
|
337 |
-
|
338 |
-
# shape: (batch_size, max_caption_length, vocab_size)
|
339 |
-
output_logits = self.output(textual_features)
|
340 |
-
if isinstance(trans_out, tuple):
|
341 |
-
return output_logits, trans_out[1]
|
342 |
-
else:
|
343 |
-
return output_logits
|
344 |
-
|
345 |
-
def _generate_future_mask(
|
346 |
-
self, size: int, dtype: torch.dtype, device: torch.device
|
347 |
-
):
|
348 |
-
# Default mask is for forward direction. Flip for backward direction.
|
349 |
-
mask = torch.triu(
|
350 |
-
torch.ones(size, size, device=device, dtype=dtype), diagonal=1
|
351 |
-
)
|
352 |
-
mask = mask.masked_fill(mask == 1, float("-inf"))
|
353 |
-
return mask
|
354 |
-
|
355 |
-
|
356 |
-
class AutoRegressiveBeamSearch(object):
|
357 |
-
def __init__(
|
358 |
-
self,
|
359 |
-
end_token_id: int,
|
360 |
-
max_steps: int = 50,
|
361 |
-
beam_size: int = 5,
|
362 |
-
objectdet=True,
|
363 |
-
per_node_beam_size: int = 2,
|
364 |
-
):
|
365 |
-
self._eos_index = end_token_id
|
366 |
-
self.max_steps = max_steps
|
367 |
-
self.beam_size = beam_size
|
368 |
-
self.objectdet = objectdet
|
369 |
-
self.per_node_beam_size = per_node_beam_size or beam_size
|
370 |
-
|
371 |
-
def search(self, begin_tokens, step):
|
372 |
-
if self.beam_size > 1 and self.objectdet:
|
373 |
-
only_return_best = False
|
374 |
-
else:
|
375 |
-
only_return_best = True
|
376 |
-
|
377 |
-
batch_size = begin_tokens.size()[0]
|
378 |
-
|
379 |
-
predictions = begin_tokens.unsqueeze(1).expand((batch_size, self.beam_size, begin_tokens.shape[-1]))
|
380 |
-
# Calculate the first timestep. This is done outside the main loop
|
381 |
-
# because we are going from a single decoder input (the output from the
|
382 |
-
# encoder) to the top `beam_size` decoder outputs. On the other hand,
|
383 |
-
# within the main loop we are going from the `beam_size` elements of the
|
384 |
-
# beam to `beam_size`^2 candidates from which we will select the top
|
385 |
-
# `beam_size` elements for the next iteration.
|
386 |
-
# shape: (batch_size, num_classes)
|
387 |
-
start_class_logits = step(begin_tokens)
|
388 |
-
|
389 |
-
# Convert logits to logprobs.
|
390 |
-
# shape: (batch_size * beam_size, vocab_size)
|
391 |
-
start_class_logprobs = F.log_softmax(start_class_logits, dim=1)
|
392 |
-
|
393 |
-
num_classes = start_class_logprobs.size()[1]
|
394 |
-
|
395 |
-
# shape: (batch_size, beam_size), (batch_size, beam_size)
|
396 |
-
start_top_logprobs, start_predicted_classes = start_class_logprobs.topk(
|
397 |
-
self.beam_size
|
398 |
-
)
|
399 |
-
|
400 |
-
if (
|
401 |
-
self.beam_size == 1
|
402 |
-
and (start_predicted_classes == self._eos_index).all()
|
403 |
-
):
|
404 |
-
warnings.warn(
|
405 |
-
"Empty object description predicted. You may want to increase beam"
|
406 |
-
"size or ensure your step function is working properly.",
|
407 |
-
RuntimeWarning,
|
408 |
-
)
|
409 |
-
if only_return_best:
|
410 |
-
return start_predicted_classes, start_top_logprobs
|
411 |
-
else:
|
412 |
-
return start_predicted_classes.unsqueeze(-1), start_top_logprobs
|
413 |
-
|
414 |
-
# The log probs for the last time step.
|
415 |
-
# shape: (batch_size, beam_size)
|
416 |
-
last_logprobs = start_top_logprobs
|
417 |
-
|
418 |
-
# shape: (batch_size, beam_size, sequence_length)
|
419 |
-
predictions = torch.cat([predictions, start_predicted_classes.unsqueeze(-1)], dim=-1)
|
420 |
-
|
421 |
-
# Log probability tensor that mandates that the end token is selected.
|
422 |
-
# shape: (batch_size * beam_size, num_classes)
|
423 |
-
logprobs_after_end = start_class_logprobs.new_full(
|
424 |
-
(batch_size * self.beam_size, num_classes), float("-inf")
|
425 |
-
)
|
426 |
-
logprobs_after_end[:, self._eos_index] = 0.0
|
427 |
-
|
428 |
-
logits_after_end = start_class_logprobs.new_full(
|
429 |
-
(batch_size * self.beam_size, num_classes), float("-inf")
|
430 |
-
)
|
431 |
-
logits_after_end[:, self._eos_index] = 0
|
432 |
-
|
433 |
-
while predictions.shape[-1] < self.max_steps:
|
434 |
-
# shape: (batch_size * beam_size,)
|
435 |
-
last_predictions = predictions[:, :, -1].reshape(batch_size * self.beam_size)
|
436 |
-
|
437 |
-
# If every predicted token from the last step is `self._eos_index`,
|
438 |
-
# then we can stop early.
|
439 |
-
if (last_predictions == self._eos_index).all():
|
440 |
-
break
|
441 |
-
|
442 |
-
predictions_so_far = predictions.view(
|
443 |
-
batch_size * self.beam_size, -1
|
444 |
-
)
|
445 |
-
# shape: (batch_size * beam_size, num_classes)
|
446 |
-
class_logits = step(predictions_so_far)
|
447 |
-
|
448 |
-
# Set logprobs of last predicted tokens as high negative value to avoid
|
449 |
-
# repetition in description.
|
450 |
-
class_logits = class_logits.scatter(1, predictions_so_far[:, -1].view((-1, 1)), -10000)
|
451 |
-
|
452 |
-
# shape: (batch_size * beam_size, num_classes)
|
453 |
-
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
|
454 |
-
batch_size * self.beam_size, num_classes
|
455 |
-
)
|
456 |
-
|
457 |
-
# Here we are finding any beams where we predicted the end token in
|
458 |
-
# the previous timestep and replacing the distribution with a
|
459 |
-
# one-hot distribution, forcing the beam to predict the end token
|
460 |
-
# this timestep as well.
|
461 |
-
class_logits = torch.where(
|
462 |
-
last_predictions_expanded == self._eos_index,
|
463 |
-
logits_after_end,
|
464 |
-
class_logits,
|
465 |
-
)
|
466 |
-
|
467 |
-
# Convert logits to logprobs.
|
468 |
-
# shape: (batch_size * beam_size, vocab_size)
|
469 |
-
class_logprobs = F.log_softmax(class_logits, dim=1)
|
470 |
-
|
471 |
-
# shape (both): (batch_size * beam_size, per_node_beam_size)
|
472 |
-
top_logprobs, predicted_classes = class_logprobs.topk(
|
473 |
-
self.per_node_beam_size
|
474 |
-
)
|
475 |
-
|
476 |
-
# Here we expand the last log probs to `(batch_size * beam_size,
|
477 |
-
# per_node_beam_size)` so that we can add them to the current log
|
478 |
-
# probs for this timestep. This lets us maintain the log
|
479 |
-
# probability of each element on the beam.
|
480 |
-
# shape: (batch_size * beam_size, per_node_beam_size)
|
481 |
-
expanded_last_logprobs = (
|
482 |
-
last_logprobs.unsqueeze(2)
|
483 |
-
.expand(batch_size, self.beam_size, self.per_node_beam_size)
|
484 |
-
.reshape(batch_size * self.beam_size, self.per_node_beam_size)
|
485 |
-
)
|
486 |
-
# shape: (batch_size * beam_size, per_node_beam_size)
|
487 |
-
summed_top_logprobs = top_logprobs + expanded_last_logprobs
|
488 |
-
|
489 |
-
# shape: (batch_size, beam_size * per_node_beam_size)
|
490 |
-
reshaped_summed = summed_top_logprobs.reshape(
|
491 |
-
batch_size, self.beam_size * self.per_node_beam_size
|
492 |
-
)
|
493 |
-
# shape: (batch_size, beam_size * per_node_beam_size)
|
494 |
-
reshaped_predicted_classes = predicted_classes.reshape(
|
495 |
-
batch_size, self.beam_size * self.per_node_beam_size
|
496 |
-
)
|
497 |
-
# Append the predictions to the current beam.
|
498 |
-
reshaped_beam = (
|
499 |
-
predictions.view(batch_size * self.beam_size, 1, -1)
|
500 |
-
.repeat(1, self.per_node_beam_size, 1)
|
501 |
-
.reshape(batch_size, self.beam_size * self.per_node_beam_size, -1)
|
502 |
-
)
|
503 |
-
# batch_size, (beam_size * per_node_beach_size), #token
|
504 |
-
reshaped_beam = torch.cat([reshaped_beam, reshaped_predicted_classes.unsqueeze(-1)], dim=-1)
|
505 |
-
|
506 |
-
# Keep only the top `beam_size` beam indices.
|
507 |
-
# shape: (batch_size, beam_size), (batch_size, beam_size)
|
508 |
-
restricted_beam_logprobs, restricted_beam_indices = reshaped_summed.topk(
|
509 |
-
self.beam_size
|
510 |
-
)
|
511 |
-
predictions = reshaped_beam.gather(
|
512 |
-
1, restricted_beam_indices.unsqueeze(-1).repeat(1,1,reshaped_beam.shape[-1])
|
513 |
-
)
|
514 |
-
|
515 |
-
# shape: (batch_size, beam_size)
|
516 |
-
last_logprobs = restricted_beam_logprobs
|
517 |
-
|
518 |
-
if not torch.isfinite(last_logprobs).all():
|
519 |
-
warnings.warn(
|
520 |
-
"Infinite log probs encountered. Some final descriptions may not "
|
521 |
-
"make sense. This can happen when the beam size is larger than"
|
522 |
-
" the number of valid (non-zero probability) transitions that "
|
523 |
-
"the step function produces.",
|
524 |
-
RuntimeWarning,
|
525 |
-
)
|
526 |
-
|
527 |
-
# Optionally select best beam and its logprobs.
|
528 |
-
if only_return_best:
|
529 |
-
# shape: (batch_size, sequence_length)
|
530 |
-
predictions = predictions[:, 0, :]
|
531 |
-
last_logprobs = last_logprobs[:, 0]
|
532 |
-
num_valid = (predictions != self._eos_index).sum(dim=-1)
|
533 |
-
num_valid += (predictions == self._eos_index).sum(dim=-1) > 0
|
534 |
-
num_valid = num_valid - begin_tokens.shape[1]
|
535 |
-
num_valid = num_valid.clip(min=1)
|
536 |
-
|
537 |
-
last_logprobs = last_logprobs / num_valid
|
538 |
-
|
539 |
-
return predictions, last_logprobs
|
540 |
-
|
541 |
-
|
542 |
-
class GRiTTextDecoder(nn.Module):
|
543 |
-
def __init__(
|
544 |
-
self,
|
545 |
-
transformer,
|
546 |
-
begin_token_id=101,
|
547 |
-
beamsearch_decode=None,
|
548 |
-
loss_type=None,
|
549 |
-
tokenizer=None,
|
550 |
-
):
|
551 |
-
super().__init__()
|
552 |
-
self.textual = transformer
|
553 |
-
self.padding_idx = self.textual.padding_idx
|
554 |
-
|
555 |
-
self.begin_token_id = begin_token_id
|
556 |
-
self.beamsearch_decode = beamsearch_decode
|
557 |
-
self.tokenizer = tokenizer
|
558 |
-
|
559 |
-
if loss_type is None:
|
560 |
-
self.loss = nn.CrossEntropyLoss(ignore_index=self.padding_idx)
|
561 |
-
elif loss_type == 'smooth':
|
562 |
-
self.loss = SmoothLabelCrossEntropyLoss(ignore_index=self.padding_idx)
|
563 |
-
else:
|
564 |
-
raise NotImplementedError(loss_type)
|
565 |
-
|
566 |
-
def forward(self, batch):
|
567 |
-
object_features = batch['object_features']
|
568 |
-
|
569 |
-
if self.training:
|
570 |
-
caption_token_input = batch["text_tokens"]
|
571 |
-
|
572 |
-
output_logits = self.textual(
|
573 |
-
object_features,
|
574 |
-
caption_token_input,
|
575 |
-
)
|
576 |
-
|
577 |
-
if 'need_predict' in batch:
|
578 |
-
# in place should also be good, but we do not choose that for
|
579 |
-
# safety as we may use it in prediction results in future
|
580 |
-
target = batch["text_tokens"].clone()
|
581 |
-
target[batch['need_predict'] == 0] = self.padding_idx
|
582 |
-
else:
|
583 |
-
target = batch["text_tokens"]
|
584 |
-
|
585 |
-
feat = output_logits[:, :-1].contiguous()
|
586 |
-
target = target[:, 1:].contiguous()
|
587 |
-
feat = feat.view(-1, self.textual.vocab_size)
|
588 |
-
target = target.view(-1)
|
589 |
-
|
590 |
-
valid_mask = target != self.padding_idx
|
591 |
-
target = target[valid_mask]
|
592 |
-
feat = feat[valid_mask]
|
593 |
-
loss = self.loss(feat, target)
|
594 |
-
|
595 |
-
return loss
|
596 |
-
else:
|
597 |
-
output_dict = self.infer(object_features)
|
598 |
-
return output_dict
|
599 |
-
|
600 |
-
def infer(self, object_features):
|
601 |
-
batch_size = object_features.size(0)
|
602 |
-
begin_tokens = object_features.new_full(
|
603 |
-
(batch_size, 1), self.begin_token_id
|
604 |
-
).long()
|
605 |
-
|
606 |
-
decoding_step = functools.partial(
|
607 |
-
self.decoding_step, object_features
|
608 |
-
)
|
609 |
-
|
610 |
-
object_description_tokens, logprobs = self.beamsearch_decode.search(
|
611 |
-
begin_tokens, decoding_step
|
612 |
-
)
|
613 |
-
|
614 |
-
output_dict = {
|
615 |
-
'predictions': object_description_tokens,
|
616 |
-
'logprobs': logprobs,
|
617 |
-
}
|
618 |
-
|
619 |
-
return output_dict
|
620 |
-
|
621 |
-
def decoding_step(self, object_features, partial_text):
|
622 |
-
batch_size = object_features.shape[0]
|
623 |
-
beam_size = int(partial_text.size(0) / batch_size)
|
624 |
-
if beam_size > 1:
|
625 |
-
batch_size, num_token, channels = object_features.size()
|
626 |
-
object_features = object_features.unsqueeze(1).repeat(1, beam_size, 1, 1)
|
627 |
-
object_features = object_features.view(
|
628 |
-
batch_size * beam_size, num_token, channels
|
629 |
-
)
|
630 |
-
|
631 |
-
text_lengths = torch.ones_like(partial_text)
|
632 |
-
if len(text_lengths.size()) != 2:
|
633 |
-
partial_text = partial_text.unsqueeze(1)
|
634 |
-
|
635 |
-
# shape: (batch_size * beam_size, partial_caption_length, vocab_size)
|
636 |
-
logits = self.textual(
|
637 |
-
object_features,
|
638 |
-
partial_text,
|
639 |
-
)
|
640 |
-
|
641 |
-
return logits[:, -1, :].float()
|
642 |
-
|
643 |
-
|
644 |
-
class SmoothLabelCrossEntropyLoss(nn.Module):
|
645 |
-
def __init__(self, eps=0.1, log_prefix='', ignore_index=None):
|
646 |
-
super().__init__()
|
647 |
-
self.eps = eps
|
648 |
-
self.log_soft = nn.LogSoftmax(dim=1)
|
649 |
-
self.kl = nn.KLDivLoss(reduction='none')
|
650 |
-
|
651 |
-
self.iter = 0
|
652 |
-
self.max_loss = 0
|
653 |
-
self.min_loss = 0
|
654 |
-
self.log_prefix = log_prefix
|
655 |
-
self.ignore_index = ignore_index
|
656 |
-
|
657 |
-
def forward(self, feature, target):
|
658 |
-
feature = feature.float()
|
659 |
-
if self.ignore_index is not None:
|
660 |
-
valid_mask = target != self.ignore_index
|
661 |
-
target = target[valid_mask]
|
662 |
-
feature = feature[valid_mask]
|
663 |
-
assert target.numel() > 0
|
664 |
-
self.iter += 1
|
665 |
-
eps = self.eps
|
666 |
-
n_class = feature.size(1)
|
667 |
-
one_hot = torch.zeros_like(feature).scatter(1, target.view(-1, 1), 1)
|
668 |
-
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
|
669 |
-
log_prb = self.log_soft(feature)
|
670 |
-
loss = self.kl(log_prb, one_hot)
|
671 |
-
return loss.sum(dim=1).mean()
|
672 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/proposal_generator/proposal_utils.py
DELETED
@@ -1,196 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import math
|
4 |
-
from typing import List, Tuple, Union
|
5 |
-
import torch
|
6 |
-
|
7 |
-
from detectron2.layers import batched_nms, cat
|
8 |
-
from detectron2.structures import Boxes, Instances
|
9 |
-
|
10 |
-
logger = logging.getLogger(__name__)
|
11 |
-
|
12 |
-
|
13 |
-
def _is_tracing():
|
14 |
-
# (fixed in TORCH_VERSION >= 1.9)
|
15 |
-
if torch.jit.is_scripting():
|
16 |
-
# https://github.com/pytorch/pytorch/issues/47379
|
17 |
-
return False
|
18 |
-
else:
|
19 |
-
return torch.jit.is_tracing()
|
20 |
-
|
21 |
-
|
22 |
-
def find_top_rpn_proposals(
|
23 |
-
proposals: List[torch.Tensor],
|
24 |
-
pred_objectness_logits: List[torch.Tensor],
|
25 |
-
image_sizes: List[Tuple[int, int]],
|
26 |
-
nms_thresh: float,
|
27 |
-
pre_nms_topk: int,
|
28 |
-
post_nms_topk: int,
|
29 |
-
min_box_size: float,
|
30 |
-
training: bool,
|
31 |
-
):
|
32 |
-
"""
|
33 |
-
For each feature map, select the `pre_nms_topk` highest scoring proposals,
|
34 |
-
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
|
35 |
-
highest scoring proposals among all the feature maps for each image.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4).
|
39 |
-
All proposal predictions on the feature maps.
|
40 |
-
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
|
41 |
-
image_sizes (list[tuple]): sizes (h, w) for each image
|
42 |
-
nms_thresh (float): IoU threshold to use for NMS
|
43 |
-
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
|
44 |
-
When RPN is run on multiple feature maps (as in FPN) this number is per
|
45 |
-
feature map.
|
46 |
-
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
|
47 |
-
When RPN is run on multiple feature maps (as in FPN) this number is total,
|
48 |
-
over all feature maps.
|
49 |
-
min_box_size (float): minimum proposal box side length in pixels (absolute units
|
50 |
-
wrt input images).
|
51 |
-
training (bool): True if proposals are to be used in training, otherwise False.
|
52 |
-
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
|
53 |
-
comment.
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
list[Instances]: list of N Instances. The i-th Instances
|
57 |
-
stores post_nms_topk object proposals for image i, sorted by their
|
58 |
-
objectness score in descending order.
|
59 |
-
"""
|
60 |
-
num_images = len(image_sizes)
|
61 |
-
device = proposals[0].device
|
62 |
-
|
63 |
-
# 1. Select top-k anchor for every level and every image
|
64 |
-
topk_scores = [] # #lvl Tensor, each of shape N x topk
|
65 |
-
topk_proposals = []
|
66 |
-
level_ids = [] # #lvl Tensor, each of shape (topk,)
|
67 |
-
batch_idx = torch.arange(num_images, device=device)
|
68 |
-
for level_id, (proposals_i, logits_i) in enumerate(zip(proposals, pred_objectness_logits)):
|
69 |
-
Hi_Wi_A = logits_i.shape[1]
|
70 |
-
if isinstance(Hi_Wi_A, torch.Tensor): # it's a tensor in tracing
|
71 |
-
num_proposals_i = torch.clamp(Hi_Wi_A, max=pre_nms_topk)
|
72 |
-
else:
|
73 |
-
num_proposals_i = min(Hi_Wi_A, pre_nms_topk)
|
74 |
-
|
75 |
-
topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
|
76 |
-
|
77 |
-
# each is N x topk
|
78 |
-
topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4
|
79 |
-
|
80 |
-
topk_proposals.append(topk_proposals_i)
|
81 |
-
topk_scores.append(topk_scores_i)
|
82 |
-
level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
|
83 |
-
|
84 |
-
# 2. Concat all levels together
|
85 |
-
topk_scores = cat(topk_scores, dim=1)
|
86 |
-
topk_proposals = cat(topk_proposals, dim=1)
|
87 |
-
level_ids = cat(level_ids, dim=0)
|
88 |
-
|
89 |
-
# 3. For each image, run a per-level NMS, and choose topk results.
|
90 |
-
results: List[Instances] = []
|
91 |
-
for n, image_size in enumerate(image_sizes):
|
92 |
-
boxes = Boxes(topk_proposals[n])
|
93 |
-
scores_per_img = topk_scores[n]
|
94 |
-
lvl = level_ids
|
95 |
-
|
96 |
-
valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
|
97 |
-
if not valid_mask.all():
|
98 |
-
if training:
|
99 |
-
raise FloatingPointError(
|
100 |
-
"Predicted boxes or scores contain Inf/NaN. Training has diverged."
|
101 |
-
)
|
102 |
-
boxes = boxes[valid_mask]
|
103 |
-
scores_per_img = scores_per_img[valid_mask]
|
104 |
-
lvl = lvl[valid_mask]
|
105 |
-
boxes.clip(image_size)
|
106 |
-
|
107 |
-
# filter empty boxes
|
108 |
-
keep = boxes.nonempty(threshold=min_box_size)
|
109 |
-
if _is_tracing() or keep.sum().item() != len(boxes):
|
110 |
-
boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep]
|
111 |
-
|
112 |
-
keep = batched_nms(boxes.tensor, scores_per_img, lvl, nms_thresh)
|
113 |
-
# In Detectron1, there was different behavior during training vs. testing.
|
114 |
-
# (https://github.com/facebookresearch/Detectron/issues/459)
|
115 |
-
# During training, topk is over the proposals from *all* images in the training batch.
|
116 |
-
# During testing, it is over the proposals for each image separately.
|
117 |
-
# As a result, the training behavior becomes batch-dependent,
|
118 |
-
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
|
119 |
-
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
|
120 |
-
keep = keep[:post_nms_topk] # keep is already sorted
|
121 |
-
|
122 |
-
res = Instances(image_size)
|
123 |
-
res.proposal_boxes = boxes[keep]
|
124 |
-
res.objectness_logits = scores_per_img[keep]
|
125 |
-
results.append(res)
|
126 |
-
return results
|
127 |
-
|
128 |
-
|
129 |
-
def add_ground_truth_to_proposals(
|
130 |
-
gt: Union[List[Instances], List[Boxes]], proposals: List[Instances]
|
131 |
-
) -> List[Instances]:
|
132 |
-
"""
|
133 |
-
Call `add_ground_truth_to_proposals_single_image` for all images.
|
134 |
-
|
135 |
-
Args:
|
136 |
-
gt(Union[List[Instances], List[Boxes]): list of N elements. Element i is a Instances
|
137 |
-
representing the ground-truth for image i.
|
138 |
-
proposals (list[Instances]): list of N elements. Element i is a Instances
|
139 |
-
representing the proposals for image i.
|
140 |
-
|
141 |
-
Returns:
|
142 |
-
list[Instances]: list of N Instances. Each is the proposals for the image,
|
143 |
-
with field "proposal_boxes" and "objectness_logits".
|
144 |
-
"""
|
145 |
-
assert gt is not None
|
146 |
-
|
147 |
-
if len(proposals) != len(gt):
|
148 |
-
raise ValueError("proposals and gt should have the same length as the number of images!")
|
149 |
-
if len(proposals) == 0:
|
150 |
-
return proposals
|
151 |
-
|
152 |
-
return [
|
153 |
-
add_ground_truth_to_proposals_single_image(gt_i, proposals_i)
|
154 |
-
for gt_i, proposals_i in zip(gt, proposals)
|
155 |
-
]
|
156 |
-
|
157 |
-
|
158 |
-
def add_ground_truth_to_proposals_single_image(
|
159 |
-
gt: Union[Instances, Boxes], proposals: Instances
|
160 |
-
) -> Instances:
|
161 |
-
"""
|
162 |
-
Augment `proposals` with `gt`.
|
163 |
-
|
164 |
-
Args:
|
165 |
-
Same as `add_ground_truth_to_proposals`, but with gt and proposals
|
166 |
-
per image.
|
167 |
-
|
168 |
-
Returns:
|
169 |
-
Same as `add_ground_truth_to_proposals`, but for only one image.
|
170 |
-
"""
|
171 |
-
if isinstance(gt, Boxes):
|
172 |
-
# convert Boxes to Instances
|
173 |
-
gt = Instances(proposals.image_size, gt_boxes=gt)
|
174 |
-
|
175 |
-
gt_boxes = gt.gt_boxes
|
176 |
-
device = proposals.objectness_logits.device
|
177 |
-
# Assign all ground-truth boxes an objectness logit corresponding to
|
178 |
-
# P(object) = sigmoid(logit) =~ 1.
|
179 |
-
gt_logit_value = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10)))
|
180 |
-
gt_logits = gt_logit_value * torch.ones(len(gt_boxes), device=device)
|
181 |
-
|
182 |
-
# Concatenating gt_boxes with proposals requires them to have the same fields
|
183 |
-
gt_proposal = Instances(proposals.image_size, **gt.get_fields())
|
184 |
-
gt_proposal.proposal_boxes = gt_boxes
|
185 |
-
gt_proposal.objectness_logits = gt_logits
|
186 |
-
|
187 |
-
for key in proposals.get_fields().keys():
|
188 |
-
assert gt_proposal.has(
|
189 |
-
key
|
190 |
-
), "The attribute '{}' in `proposals` does not exist in `gt`".format(key)
|
191 |
-
|
192 |
-
# NOTE: Instances.cat only use fields from the first item. Extra fields in latter items
|
193 |
-
# will be thrown away.
|
194 |
-
new_proposals = Instances.cat([proposals, gt_proposal])
|
195 |
-
|
196 |
-
return new_proposals
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AxelBell/EasyOCR_text_recognition/assets/style.css
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
:root {
|
2 |
-
--primary-100: #fce7f3;
|
3 |
-
--primary-100: #ffecb3;
|
4 |
-
--primary-200: #ffe082;
|
5 |
-
--primary-300: #ffd54f;
|
6 |
-
--primary-400: #ffca28;
|
7 |
-
--primary-500: #ffc107;
|
8 |
-
--primary-600: #ffb300;
|
9 |
-
--primary-700: #ffa000;
|
10 |
-
--primary-800: #ff8f00;
|
11 |
-
--primary-900: #ff6f00;
|
12 |
-
--primary-950: #f57c00;
|
13 |
-
--slider-color: #fc9925;
|
14 |
-
--checkbox-background-color-selected: #fc9925;
|
15 |
-
--button-primary-background-fill: #fc9925;
|
16 |
-
--button-primary-text-color:var(--primary-100)
|
17 |
-
--background-fill-secondary: var(--neutral-900);
|
18 |
-
--block-background-fill: #31395294;
|
19 |
-
--block-border-color: var(--border-color-primary);
|
20 |
-
--block-info-text-color: #f8f8f2;
|
21 |
-
--block-label-background-fill: var(--background-fill-secondary);
|
22 |
-
--block-label-border-color: var(--border-color-primary);
|
23 |
-
--block-label-text-color: #f8f8f2;
|
24 |
-
--block-title-text-color: #f8f8f2;
|
25 |
-
--body-background-fill: var(--background-fill-primary);
|
26 |
-
--body-text-color: #f8f8f2;
|
27 |
-
--body-text-color-subdued: var(--neutral-400);
|
28 |
-
--border-color-accent: var(--neutral-600);
|
29 |
-
--border-color-primary: var(--neutral-700);
|
30 |
-
--button-border-width: var(--input-border-width);
|
31 |
-
--button-cancel-background-fill: var(--button-secondary-background-fill);
|
32 |
-
--button-cancel-background-fill-hover: var(--button-cancel-background-fill);
|
33 |
-
--button-cancel-border-color: var(--button-secondary-border-color);
|
34 |
-
--button-cancel-border-color-hover: var(--button-cancel-border-color);
|
35 |
-
}
|
36 |
-
.dark{
|
37 |
-
--primary-100: #fce7f3;
|
38 |
-
--primary-100: #ffecb3;
|
39 |
-
--primary-200: #ffe082;
|
40 |
-
--primary-300: #ffd54f;
|
41 |
-
--primary-400: #ffca28;
|
42 |
-
--primary-500: #ffc107;
|
43 |
-
--primary-600: #ffb300;
|
44 |
-
--primary-700: #ffa000;
|
45 |
-
--primary-800: #ff8f00;
|
46 |
-
--primary-900: #ff6f00;
|
47 |
-
--primary-950: #f57c00;
|
48 |
-
--slider-color: #fc9925;
|
49 |
-
--checkbox-background-color-selected: #fc9925;
|
50 |
-
--button-primary-background-fill: #fc9925;
|
51 |
-
--button-primary-text-color:var(--primary-100)
|
52 |
-
}
|
53 |
-
|
54 |
-
body {
|
55 |
-
flex-grow: initial !important;
|
56 |
-
}
|
57 |
-
.show-api, .built-with {
|
58 |
-
color: #FC9925 !important;
|
59 |
-
}
|
60 |
-
#lang ul {
|
61 |
-
max-height: 300px !important;
|
62 |
-
}
|
63 |
-
#examples {
|
64 |
-
overflow-y: auto !important;
|
65 |
-
}
|
66 |
-
#examples th {
|
67 |
-
display: none;
|
68 |
-
}
|
69 |
-
#examples td:nth-child(n + 3) {
|
70 |
-
display: none;
|
71 |
-
}
|
72 |
-
#examples td:nth-child(1) {
|
73 |
-
display: none;
|
74 |
-
}
|
75 |
-
#examples .table-wrap {
|
76 |
-
width: min-content;
|
77 |
-
}
|
78 |
-
#examples tbody {
|
79 |
-
display: flex;
|
80 |
-
}
|
81 |
-
.center {
|
82 |
-
text-align: center;
|
83 |
-
max-width: 60%;
|
84 |
-
margin: auto;
|
85 |
-
}
|
86 |
-
.fs-xx {
|
87 |
-
font-size: xx-large;
|
88 |
-
color: #FC9925 !important
|
89 |
-
}
|
90 |
-
.fs-x {
|
91 |
-
font-size: x-large;
|
92 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/lib/globals/globals.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
DoFormant: bool = False
|
2 |
-
Quefrency: float = 8.0
|
3 |
-
Timbre: float = 1.2
|
4 |
-
|
5 |
-
NotesOrHertz: bool = False
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/__init__.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
|
3 |
-
__version__ = "23.1.2"
|
4 |
-
|
5 |
-
|
6 |
-
def main(args: Optional[List[str]] = None) -> int:
|
7 |
-
"""This is an internal API only meant for use by pip's own console scripts.
|
8 |
-
|
9 |
-
For additional details, see https://github.com/pypa/pip/issues/7498.
|
10 |
-
"""
|
11 |
-
from pip._internal.utils.entrypoints import _wrapper
|
12 |
-
|
13 |
-
return _wrapper(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/emoji.py
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
from typing import TYPE_CHECKING, Optional, Union
|
3 |
-
|
4 |
-
from .jupyter import JupyterMixin
|
5 |
-
from .segment import Segment
|
6 |
-
from .style import Style
|
7 |
-
from ._emoji_codes import EMOJI
|
8 |
-
from ._emoji_replace import _emoji_replace
|
9 |
-
|
10 |
-
if sys.version_info >= (3, 8):
|
11 |
-
from typing import Literal
|
12 |
-
else:
|
13 |
-
from pip._vendor.typing_extensions import Literal # pragma: no cover
|
14 |
-
|
15 |
-
|
16 |
-
if TYPE_CHECKING:
|
17 |
-
from .console import Console, ConsoleOptions, RenderResult
|
18 |
-
|
19 |
-
|
20 |
-
EmojiVariant = Literal["emoji", "text"]
|
21 |
-
|
22 |
-
|
23 |
-
class NoEmoji(Exception):
|
24 |
-
"""No emoji by that name."""
|
25 |
-
|
26 |
-
|
27 |
-
class Emoji(JupyterMixin):
|
28 |
-
__slots__ = ["name", "style", "_char", "variant"]
|
29 |
-
|
30 |
-
VARIANTS = {"text": "\uFE0E", "emoji": "\uFE0F"}
|
31 |
-
|
32 |
-
def __init__(
|
33 |
-
self,
|
34 |
-
name: str,
|
35 |
-
style: Union[str, Style] = "none",
|
36 |
-
variant: Optional[EmojiVariant] = None,
|
37 |
-
) -> None:
|
38 |
-
"""A single emoji character.
|
39 |
-
|
40 |
-
Args:
|
41 |
-
name (str): Name of emoji.
|
42 |
-
style (Union[str, Style], optional): Optional style. Defaults to None.
|
43 |
-
|
44 |
-
Raises:
|
45 |
-
NoEmoji: If the emoji doesn't exist.
|
46 |
-
"""
|
47 |
-
self.name = name
|
48 |
-
self.style = style
|
49 |
-
self.variant = variant
|
50 |
-
try:
|
51 |
-
self._char = EMOJI[name]
|
52 |
-
except KeyError:
|
53 |
-
raise NoEmoji(f"No emoji called {name!r}")
|
54 |
-
if variant is not None:
|
55 |
-
self._char += self.VARIANTS.get(variant, "")
|
56 |
-
|
57 |
-
@classmethod
|
58 |
-
def replace(cls, text: str) -> str:
|
59 |
-
"""Replace emoji markup with corresponding unicode characters.
|
60 |
-
|
61 |
-
Args:
|
62 |
-
text (str): A string with emojis codes, e.g. "Hello :smiley:!"
|
63 |
-
|
64 |
-
Returns:
|
65 |
-
str: A string with emoji codes replaces with actual emoji.
|
66 |
-
"""
|
67 |
-
return _emoji_replace(text)
|
68 |
-
|
69 |
-
def __repr__(self) -> str:
|
70 |
-
return f"<emoji {self.name!r}>"
|
71 |
-
|
72 |
-
def __str__(self) -> str:
|
73 |
-
return self._char
|
74 |
-
|
75 |
-
def __rich_console__(
|
76 |
-
self, console: "Console", options: "ConsoleOptions"
|
77 |
-
) -> "RenderResult":
|
78 |
-
yield Segment(self._char, console.get_style(self.style))
|
79 |
-
|
80 |
-
|
81 |
-
if __name__ == "__main__": # pragma: no cover
|
82 |
-
import sys
|
83 |
-
|
84 |
-
from pip._vendor.rich.columns import Columns
|
85 |
-
from pip._vendor.rich.console import Console
|
86 |
-
|
87 |
-
console = Console(record=True)
|
88 |
-
|
89 |
-
columns = Columns(
|
90 |
-
(f":{name}: {name}" for name in sorted(EMOJI.keys()) if "\u200D" not in name),
|
91 |
-
column_first=True,
|
92 |
-
)
|
93 |
-
|
94 |
-
console.print(columns)
|
95 |
-
if len(sys.argv) > 1:
|
96 |
-
console.save_html(sys.argv[1])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/padding.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union
|
2 |
-
|
3 |
-
if TYPE_CHECKING:
|
4 |
-
from .console import (
|
5 |
-
Console,
|
6 |
-
ConsoleOptions,
|
7 |
-
RenderableType,
|
8 |
-
RenderResult,
|
9 |
-
)
|
10 |
-
from .jupyter import JupyterMixin
|
11 |
-
from .measure import Measurement
|
12 |
-
from .style import Style
|
13 |
-
from .segment import Segment
|
14 |
-
|
15 |
-
|
16 |
-
PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]]
|
17 |
-
|
18 |
-
|
19 |
-
class Padding(JupyterMixin):
|
20 |
-
"""Draw space around content.
|
21 |
-
|
22 |
-
Example:
|
23 |
-
>>> print(Padding("Hello", (2, 4), style="on blue"))
|
24 |
-
|
25 |
-
Args:
|
26 |
-
renderable (RenderableType): String or other renderable.
|
27 |
-
pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders.
|
28 |
-
May be specified with 1, 2, or 4 integers (CSS style).
|
29 |
-
style (Union[str, Style], optional): Style for padding characters. Defaults to "none".
|
30 |
-
expand (bool, optional): Expand padding to fit available width. Defaults to True.
|
31 |
-
"""
|
32 |
-
|
33 |
-
def __init__(
|
34 |
-
self,
|
35 |
-
renderable: "RenderableType",
|
36 |
-
pad: "PaddingDimensions" = (0, 0, 0, 0),
|
37 |
-
*,
|
38 |
-
style: Union[str, Style] = "none",
|
39 |
-
expand: bool = True,
|
40 |
-
):
|
41 |
-
self.renderable = renderable
|
42 |
-
self.top, self.right, self.bottom, self.left = self.unpack(pad)
|
43 |
-
self.style = style
|
44 |
-
self.expand = expand
|
45 |
-
|
46 |
-
@classmethod
|
47 |
-
def indent(cls, renderable: "RenderableType", level: int) -> "Padding":
|
48 |
-
"""Make padding instance to render an indent.
|
49 |
-
|
50 |
-
Args:
|
51 |
-
renderable (RenderableType): String or other renderable.
|
52 |
-
level (int): Number of characters to indent.
|
53 |
-
|
54 |
-
Returns:
|
55 |
-
Padding: A Padding instance.
|
56 |
-
"""
|
57 |
-
|
58 |
-
return Padding(renderable, pad=(0, 0, 0, level), expand=False)
|
59 |
-
|
60 |
-
@staticmethod
|
61 |
-
def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]:
|
62 |
-
"""Unpack padding specified in CSS style."""
|
63 |
-
if isinstance(pad, int):
|
64 |
-
return (pad, pad, pad, pad)
|
65 |
-
if len(pad) == 1:
|
66 |
-
_pad = pad[0]
|
67 |
-
return (_pad, _pad, _pad, _pad)
|
68 |
-
if len(pad) == 2:
|
69 |
-
pad_top, pad_right = cast(Tuple[int, int], pad)
|
70 |
-
return (pad_top, pad_right, pad_top, pad_right)
|
71 |
-
if len(pad) == 4:
|
72 |
-
top, right, bottom, left = cast(Tuple[int, int, int, int], pad)
|
73 |
-
return (top, right, bottom, left)
|
74 |
-
raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given")
|
75 |
-
|
76 |
-
def __repr__(self) -> str:
|
77 |
-
return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))"
|
78 |
-
|
79 |
-
def __rich_console__(
|
80 |
-
self, console: "Console", options: "ConsoleOptions"
|
81 |
-
) -> "RenderResult":
|
82 |
-
style = console.get_style(self.style)
|
83 |
-
if self.expand:
|
84 |
-
width = options.max_width
|
85 |
-
else:
|
86 |
-
width = min(
|
87 |
-
Measurement.get(console, options, self.renderable).maximum
|
88 |
-
+ self.left
|
89 |
-
+ self.right,
|
90 |
-
options.max_width,
|
91 |
-
)
|
92 |
-
render_options = options.update_width(width - self.left - self.right)
|
93 |
-
if render_options.height is not None:
|
94 |
-
render_options = render_options.update_height(
|
95 |
-
height=render_options.height - self.top - self.bottom
|
96 |
-
)
|
97 |
-
lines = console.render_lines(
|
98 |
-
self.renderable, render_options, style=style, pad=True
|
99 |
-
)
|
100 |
-
_Segment = Segment
|
101 |
-
|
102 |
-
left = _Segment(" " * self.left, style) if self.left else None
|
103 |
-
right = (
|
104 |
-
[_Segment(f'{" " * self.right}', style), _Segment.line()]
|
105 |
-
if self.right
|
106 |
-
else [_Segment.line()]
|
107 |
-
)
|
108 |
-
blank_line: Optional[List[Segment]] = None
|
109 |
-
if self.top:
|
110 |
-
blank_line = [_Segment(f'{" " * width}\n', style)]
|
111 |
-
yield from blank_line * self.top
|
112 |
-
if left:
|
113 |
-
for line in lines:
|
114 |
-
yield left
|
115 |
-
yield from line
|
116 |
-
yield from right
|
117 |
-
else:
|
118 |
-
for line in lines:
|
119 |
-
yield from line
|
120 |
-
yield from right
|
121 |
-
if self.bottom:
|
122 |
-
blank_line = blank_line or [_Segment(f'{" " * width}\n', style)]
|
123 |
-
yield from blank_line * self.bottom
|
124 |
-
|
125 |
-
def __rich_measure__(
|
126 |
-
self, console: "Console", options: "ConsoleOptions"
|
127 |
-
) -> "Measurement":
|
128 |
-
max_width = options.max_width
|
129 |
-
extra_width = self.left + self.right
|
130 |
-
if max_width - extra_width < 1:
|
131 |
-
return Measurement(max_width, max_width)
|
132 |
-
measure_min, measure_max = Measurement.get(console, options, self.renderable)
|
133 |
-
measurement = Measurement(measure_min + extra_width, measure_max + extra_width)
|
134 |
-
measurement = measurement.with_maximum(max_width)
|
135 |
-
return measurement
|
136 |
-
|
137 |
-
|
138 |
-
if __name__ == "__main__": # pragma: no cover
|
139 |
-
from pip._vendor.rich import print
|
140 |
-
|
141 |
-
print(Padding("Hello, World", (2, 4), style="on blue"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/ntlmpool.py
DELETED
@@ -1,130 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
NTLM authenticating pool, contributed by erikcederstran
|
3 |
-
|
4 |
-
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
|
5 |
-
"""
|
6 |
-
from __future__ import absolute_import
|
7 |
-
|
8 |
-
import warnings
|
9 |
-
from logging import getLogger
|
10 |
-
|
11 |
-
from ntlm import ntlm
|
12 |
-
|
13 |
-
from .. import HTTPSConnectionPool
|
14 |
-
from ..packages.six.moves.http_client import HTTPSConnection
|
15 |
-
|
16 |
-
warnings.warn(
|
17 |
-
"The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed "
|
18 |
-
"in urllib3 v2.0 release, urllib3 is not able to support it properly due "
|
19 |
-
"to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. "
|
20 |
-
"If you are a user of this module please comment in the mentioned issue.",
|
21 |
-
DeprecationWarning,
|
22 |
-
)
|
23 |
-
|
24 |
-
log = getLogger(__name__)
|
25 |
-
|
26 |
-
|
27 |
-
class NTLMConnectionPool(HTTPSConnectionPool):
|
28 |
-
"""
|
29 |
-
Implements an NTLM authentication version of an urllib3 connection pool
|
30 |
-
"""
|
31 |
-
|
32 |
-
scheme = "https"
|
33 |
-
|
34 |
-
def __init__(self, user, pw, authurl, *args, **kwargs):
|
35 |
-
"""
|
36 |
-
authurl is a random URL on the server that is protected by NTLM.
|
37 |
-
user is the Windows user, probably in the DOMAIN\\username format.
|
38 |
-
pw is the password for the user.
|
39 |
-
"""
|
40 |
-
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
|
41 |
-
self.authurl = authurl
|
42 |
-
self.rawuser = user
|
43 |
-
user_parts = user.split("\\", 1)
|
44 |
-
self.domain = user_parts[0].upper()
|
45 |
-
self.user = user_parts[1]
|
46 |
-
self.pw = pw
|
47 |
-
|
48 |
-
def _new_conn(self):
|
49 |
-
# Performs the NTLM handshake that secures the connection. The socket
|
50 |
-
# must be kept open while requests are performed.
|
51 |
-
self.num_connections += 1
|
52 |
-
log.debug(
|
53 |
-
"Starting NTLM HTTPS connection no. %d: https://%s%s",
|
54 |
-
self.num_connections,
|
55 |
-
self.host,
|
56 |
-
self.authurl,
|
57 |
-
)
|
58 |
-
|
59 |
-
headers = {"Connection": "Keep-Alive"}
|
60 |
-
req_header = "Authorization"
|
61 |
-
resp_header = "www-authenticate"
|
62 |
-
|
63 |
-
conn = HTTPSConnection(host=self.host, port=self.port)
|
64 |
-
|
65 |
-
# Send negotiation message
|
66 |
-
headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
|
67 |
-
self.rawuser
|
68 |
-
)
|
69 |
-
log.debug("Request headers: %s", headers)
|
70 |
-
conn.request("GET", self.authurl, None, headers)
|
71 |
-
res = conn.getresponse()
|
72 |
-
reshdr = dict(res.headers)
|
73 |
-
log.debug("Response status: %s %s", res.status, res.reason)
|
74 |
-
log.debug("Response headers: %s", reshdr)
|
75 |
-
log.debug("Response data: %s [...]", res.read(100))
|
76 |
-
|
77 |
-
# Remove the reference to the socket, so that it can not be closed by
|
78 |
-
# the response object (we want to keep the socket open)
|
79 |
-
res.fp = None
|
80 |
-
|
81 |
-
# Server should respond with a challenge message
|
82 |
-
auth_header_values = reshdr[resp_header].split(", ")
|
83 |
-
auth_header_value = None
|
84 |
-
for s in auth_header_values:
|
85 |
-
if s[:5] == "NTLM ":
|
86 |
-
auth_header_value = s[5:]
|
87 |
-
if auth_header_value is None:
|
88 |
-
raise Exception(
|
89 |
-
"Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
|
90 |
-
)
|
91 |
-
|
92 |
-
# Send authentication message
|
93 |
-
ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
|
94 |
-
auth_header_value
|
95 |
-
)
|
96 |
-
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
|
97 |
-
ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
|
98 |
-
)
|
99 |
-
headers[req_header] = "NTLM %s" % auth_msg
|
100 |
-
log.debug("Request headers: %s", headers)
|
101 |
-
conn.request("GET", self.authurl, None, headers)
|
102 |
-
res = conn.getresponse()
|
103 |
-
log.debug("Response status: %s %s", res.status, res.reason)
|
104 |
-
log.debug("Response headers: %s", dict(res.headers))
|
105 |
-
log.debug("Response data: %s [...]", res.read()[:100])
|
106 |
-
if res.status != 200:
|
107 |
-
if res.status == 401:
|
108 |
-
raise Exception("Server rejected request: wrong username or password")
|
109 |
-
raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
|
110 |
-
|
111 |
-
res.fp = None
|
112 |
-
log.debug("Connection established")
|
113 |
-
return conn
|
114 |
-
|
115 |
-
def urlopen(
|
116 |
-
self,
|
117 |
-
method,
|
118 |
-
url,
|
119 |
-
body=None,
|
120 |
-
headers=None,
|
121 |
-
retries=3,
|
122 |
-
redirect=True,
|
123 |
-
assert_same_host=True,
|
124 |
-
):
|
125 |
-
if headers is None:
|
126 |
-
headers = {}
|
127 |
-
headers["Connection"] = "Keep-Alive"
|
128 |
-
return super(NTLMConnectionPool, self).urlopen(
|
129 |
-
method, url, body, headers, retries, redirect, assert_same_host
|
130 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CGMatter/modelscope-text-to-video-synthesis/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ModelScope Text To Video Synthesis
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.23.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
duplicated_from: damo-vilab/modelscope-text-to-video-synthesis
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/TensorMask/train_net.py
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
3 |
-
|
4 |
-
"""
|
5 |
-
TensorMask Training Script.
|
6 |
-
|
7 |
-
This script is a simplified version of the training script in detectron2/tools.
|
8 |
-
"""
|
9 |
-
|
10 |
-
import os
|
11 |
-
|
12 |
-
import detectron2.utils.comm as comm
|
13 |
-
from detectron2.checkpoint import DetectionCheckpointer
|
14 |
-
from detectron2.config import get_cfg
|
15 |
-
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
|
16 |
-
from detectron2.evaluation import COCOEvaluator, verify_results
|
17 |
-
|
18 |
-
from tensormask import add_tensormask_config
|
19 |
-
|
20 |
-
|
21 |
-
class Trainer(DefaultTrainer):
|
22 |
-
@classmethod
|
23 |
-
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
|
24 |
-
if output_folder is None:
|
25 |
-
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
|
26 |
-
return COCOEvaluator(dataset_name, cfg, True, output_folder)
|
27 |
-
|
28 |
-
|
29 |
-
def setup(args):
|
30 |
-
"""
|
31 |
-
Create configs and perform basic setups.
|
32 |
-
"""
|
33 |
-
cfg = get_cfg()
|
34 |
-
add_tensormask_config(cfg)
|
35 |
-
cfg.merge_from_file(args.config_file)
|
36 |
-
cfg.merge_from_list(args.opts)
|
37 |
-
cfg.freeze()
|
38 |
-
default_setup(cfg, args)
|
39 |
-
return cfg
|
40 |
-
|
41 |
-
|
42 |
-
def main(args):
|
43 |
-
cfg = setup(args)
|
44 |
-
|
45 |
-
if args.eval_only:
|
46 |
-
model = Trainer.build_model(cfg)
|
47 |
-
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
|
48 |
-
cfg.MODEL.WEIGHTS, resume=args.resume
|
49 |
-
)
|
50 |
-
res = Trainer.test(cfg, model)
|
51 |
-
if comm.is_main_process():
|
52 |
-
verify_results(cfg, res)
|
53 |
-
return res
|
54 |
-
|
55 |
-
trainer = Trainer(cfg)
|
56 |
-
trainer.resume_or_load(resume=args.resume)
|
57 |
-
return trainer.train()
|
58 |
-
|
59 |
-
|
60 |
-
if __name__ == "__main__":
|
61 |
-
args = default_argument_parser().parse_args()
|
62 |
-
print("Command Line Args:", args)
|
63 |
-
launch(
|
64 |
-
main,
|
65 |
-
args.num_gpus,
|
66 |
-
num_machines=args.num_machines,
|
67 |
-
machine_rank=args.machine_rank,
|
68 |
-
dist_url=args.dist_url,
|
69 |
-
args=(args,),
|
70 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Candeloro/anime-remove-background/app.py
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import huggingface_hub
|
3 |
-
import onnxruntime as rt
|
4 |
-
import numpy as np
|
5 |
-
import cv2
|
6 |
-
|
7 |
-
|
8 |
-
def get_mask(img, s=1024):
|
9 |
-
img = (img / 255).astype(np.float32)
|
10 |
-
h, w = h0, w0 = img.shape[:-1]
|
11 |
-
h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
|
12 |
-
ph, pw = s - h, s - w
|
13 |
-
img_input = np.zeros([s, s, 3], dtype=np.float32)
|
14 |
-
img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
|
15 |
-
img_input = np.transpose(img_input, (2, 0, 1))
|
16 |
-
img_input = img_input[np.newaxis, :]
|
17 |
-
mask = rmbg_model.run(None, {'img': img_input})[0][0]
|
18 |
-
mask = np.transpose(mask, (1, 2, 0))
|
19 |
-
mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
|
20 |
-
mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
|
21 |
-
return mask
|
22 |
-
|
23 |
-
|
24 |
-
def rmbg_fn(img):
|
25 |
-
mask = get_mask(img)
|
26 |
-
img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
|
27 |
-
mask = (mask * 255).astype(np.uint8)
|
28 |
-
img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
|
29 |
-
mask = mask.repeat(3, axis=2)
|
30 |
-
return mask, img
|
31 |
-
|
32 |
-
|
33 |
-
if __name__ == "__main__":
|
34 |
-
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
35 |
-
model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
|
36 |
-
rmbg_model = rt.InferenceSession(model_path, providers=providers)
|
37 |
-
app = gr.Blocks()
|
38 |
-
with app:
|
39 |
-
gr.Markdown("# Anime Remove Background\n\n"
|
40 |
-
"\n\n"
|
41 |
-
"demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
|
42 |
-
with gr.Row():
|
43 |
-
with gr.Column():
|
44 |
-
input_img = gr.Image(label="input image")
|
45 |
-
examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
|
46 |
-
examples = gr.Dataset(components=[input_img], samples=examples_data)
|
47 |
-
run_btn = gr.Button(variant="primary")
|
48 |
-
output_mask = gr.Image(label="mask")
|
49 |
-
output_img = gr.Image(label="result", image_mode="RGBA")
|
50 |
-
examples.click(lambda x: x[0], [examples], [input_img])
|
51 |
-
run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
|
52 |
-
app.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChandraMohanNayal/AutoGPT/tests/__init__.py
DELETED
File without changes
|
spaces/CofAI/chat/g4f/Provider/Providers/Ails.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
-
import json
|
4 |
-
import uuid
|
5 |
-
import hashlib
|
6 |
-
import requests
|
7 |
-
|
8 |
-
from ...typing import sha256, Dict, get_type_hints
|
9 |
-
from datetime import datetime
|
10 |
-
|
11 |
-
url: str = 'https://ai.ls'
|
12 |
-
model: str = 'gpt-3.5-turbo'
|
13 |
-
supports_stream = True
|
14 |
-
needs_auth = False
|
15 |
-
working = True
|
16 |
-
|
17 |
-
|
18 |
-
class Utils:
|
19 |
-
def hash(json_data: Dict[str, str]) -> sha256:
|
20 |
-
|
21 |
-
base_string: str = '%s:%s:%s:%s' % (
|
22 |
-
json_data['t'],
|
23 |
-
json_data['m'],
|
24 |
-
'WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf',
|
25 |
-
len(json_data['m'])
|
26 |
-
)
|
27 |
-
|
28 |
-
return hashlib.sha256(base_string.encode()).hexdigest()
|
29 |
-
|
30 |
-
def format_timestamp(timestamp: int) -> str:
|
31 |
-
|
32 |
-
e = timestamp
|
33 |
-
n = e % 10
|
34 |
-
r = n + 1 if n % 2 == 0 else n
|
35 |
-
return str(e - n + r)
|
36 |
-
|
37 |
-
|
38 |
-
def _create_completion(model: str, messages: list, temperature: float = 0.6, stream: bool = False, **kwargs):
|
39 |
-
|
40 |
-
headers = {
|
41 |
-
'authority': 'api.caipacity.com',
|
42 |
-
'accept': '*/*',
|
43 |
-
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
44 |
-
'authorization': 'Bearer free',
|
45 |
-
'client-id': str(uuid.uuid4()),
|
46 |
-
'client-v': '0.1.249',
|
47 |
-
'content-type': 'application/json',
|
48 |
-
'origin': 'https://ai.ls',
|
49 |
-
'referer': 'https://ai.ls/',
|
50 |
-
'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
51 |
-
'sec-ch-ua-mobile': '?0',
|
52 |
-
'sec-ch-ua-platform': '"Windows"',
|
53 |
-
'sec-fetch-dest': 'empty',
|
54 |
-
'sec-fetch-mode': 'cors',
|
55 |
-
'sec-fetch-site': 'cross-site',
|
56 |
-
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
57 |
-
}
|
58 |
-
|
59 |
-
timestamp = Utils.format_timestamp(int(time.time() * 1000))
|
60 |
-
|
61 |
-
sig = {
|
62 |
-
'd': datetime.now().strftime('%Y-%m-%d'),
|
63 |
-
't': timestamp,
|
64 |
-
's': Utils.hash({
|
65 |
-
't': timestamp,
|
66 |
-
'm': messages[-1]['content']})}
|
67 |
-
|
68 |
-
json_data = json.dumps(separators=(',', ':'), obj={
|
69 |
-
'model': 'gpt-3.5-turbo',
|
70 |
-
'temperature': 0.6,
|
71 |
-
'stream': True,
|
72 |
-
'messages': messages} | sig)
|
73 |
-
|
74 |
-
response = requests.post('https://api.caipacity.com/v1/chat/completions',
|
75 |
-
headers=headers, data=json_data, stream=True)
|
76 |
-
|
77 |
-
for token in response.iter_lines():
|
78 |
-
if b'content' in token:
|
79 |
-
completion_chunk = json.loads(token.decode().replace('data: ', ''))
|
80 |
-
token = completion_chunk['choices'][0]['delta'].get('content')
|
81 |
-
if token != None:
|
82 |
-
yield token
|
83 |
-
|
84 |
-
|
85 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
86 |
-
'(%s)' % ', '.join(
|
87 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/scaleUpem.py
DELETED
@@ -1,395 +0,0 @@
|
|
1 |
-
"""Change the units-per-EM of a font.
|
2 |
-
|
3 |
-
AAT and Graphite tables are not supported. CFF/CFF2 fonts
|
4 |
-
are de-subroutinized."""
|
5 |
-
|
6 |
-
|
7 |
-
from fontTools.ttLib.ttVisitor import TTVisitor
|
8 |
-
import fontTools.ttLib as ttLib
|
9 |
-
import fontTools.ttLib.tables.otBase as otBase
|
10 |
-
import fontTools.ttLib.tables.otTables as otTables
|
11 |
-
from fontTools.cffLib import VarStoreData
|
12 |
-
import fontTools.cffLib.specializer as cffSpecializer
|
13 |
-
from fontTools.varLib import builder # for VarData.calculateNumShorts
|
14 |
-
from fontTools.misc.fixedTools import otRound
|
15 |
-
from fontTools.ttLib.tables._g_l_y_f import VarComponentFlags
|
16 |
-
|
17 |
-
|
18 |
-
__all__ = ["scale_upem", "ScalerVisitor"]
|
19 |
-
|
20 |
-
|
21 |
-
class ScalerVisitor(TTVisitor):
|
22 |
-
def __init__(self, scaleFactor):
|
23 |
-
self.scaleFactor = scaleFactor
|
24 |
-
|
25 |
-
def scale(self, v):
|
26 |
-
return otRound(v * self.scaleFactor)
|
27 |
-
|
28 |
-
|
29 |
-
@ScalerVisitor.register_attrs(
|
30 |
-
(
|
31 |
-
(ttLib.getTableClass("head"), ("unitsPerEm", "xMin", "yMin", "xMax", "yMax")),
|
32 |
-
(ttLib.getTableClass("post"), ("underlinePosition", "underlineThickness")),
|
33 |
-
(ttLib.getTableClass("VORG"), ("defaultVertOriginY")),
|
34 |
-
(
|
35 |
-
ttLib.getTableClass("hhea"),
|
36 |
-
(
|
37 |
-
"ascent",
|
38 |
-
"descent",
|
39 |
-
"lineGap",
|
40 |
-
"advanceWidthMax",
|
41 |
-
"minLeftSideBearing",
|
42 |
-
"minRightSideBearing",
|
43 |
-
"xMaxExtent",
|
44 |
-
"caretOffset",
|
45 |
-
),
|
46 |
-
),
|
47 |
-
(
|
48 |
-
ttLib.getTableClass("vhea"),
|
49 |
-
(
|
50 |
-
"ascent",
|
51 |
-
"descent",
|
52 |
-
"lineGap",
|
53 |
-
"advanceHeightMax",
|
54 |
-
"minTopSideBearing",
|
55 |
-
"minBottomSideBearing",
|
56 |
-
"yMaxExtent",
|
57 |
-
"caretOffset",
|
58 |
-
),
|
59 |
-
),
|
60 |
-
(
|
61 |
-
ttLib.getTableClass("OS/2"),
|
62 |
-
(
|
63 |
-
"xAvgCharWidth",
|
64 |
-
"ySubscriptXSize",
|
65 |
-
"ySubscriptYSize",
|
66 |
-
"ySubscriptXOffset",
|
67 |
-
"ySubscriptYOffset",
|
68 |
-
"ySuperscriptXSize",
|
69 |
-
"ySuperscriptYSize",
|
70 |
-
"ySuperscriptXOffset",
|
71 |
-
"ySuperscriptYOffset",
|
72 |
-
"yStrikeoutSize",
|
73 |
-
"yStrikeoutPosition",
|
74 |
-
"sTypoAscender",
|
75 |
-
"sTypoDescender",
|
76 |
-
"sTypoLineGap",
|
77 |
-
"usWinAscent",
|
78 |
-
"usWinDescent",
|
79 |
-
"sxHeight",
|
80 |
-
"sCapHeight",
|
81 |
-
),
|
82 |
-
),
|
83 |
-
(
|
84 |
-
otTables.ValueRecord,
|
85 |
-
("XAdvance", "YAdvance", "XPlacement", "YPlacement"),
|
86 |
-
), # GPOS
|
87 |
-
(otTables.Anchor, ("XCoordinate", "YCoordinate")), # GPOS
|
88 |
-
(otTables.CaretValue, ("Coordinate")), # GDEF
|
89 |
-
(otTables.BaseCoord, ("Coordinate")), # BASE
|
90 |
-
(otTables.MathValueRecord, ("Value")), # MATH
|
91 |
-
(otTables.ClipBox, ("xMin", "yMin", "xMax", "yMax")), # COLR
|
92 |
-
)
|
93 |
-
)
|
94 |
-
def visit(visitor, obj, attr, value):
|
95 |
-
setattr(obj, attr, visitor.scale(value))
|
96 |
-
|
97 |
-
|
98 |
-
@ScalerVisitor.register_attr(
|
99 |
-
(ttLib.getTableClass("hmtx"), ttLib.getTableClass("vmtx")), "metrics"
|
100 |
-
)
|
101 |
-
def visit(visitor, obj, attr, metrics):
|
102 |
-
for g in metrics:
|
103 |
-
advance, lsb = metrics[g]
|
104 |
-
metrics[g] = visitor.scale(advance), visitor.scale(lsb)
|
105 |
-
|
106 |
-
|
107 |
-
@ScalerVisitor.register_attr(ttLib.getTableClass("VMTX"), "VOriginRecords")
|
108 |
-
def visit(visitor, obj, attr, VOriginRecords):
|
109 |
-
for g in VOriginRecords:
|
110 |
-
VOriginRecords[g] = visitor.scale(VOriginRecords[g])
|
111 |
-
|
112 |
-
|
113 |
-
@ScalerVisitor.register_attr(ttLib.getTableClass("glyf"), "glyphs")
|
114 |
-
def visit(visitor, obj, attr, glyphs):
|
115 |
-
for g in glyphs.values():
|
116 |
-
for attr in ("xMin", "xMax", "yMin", "yMax"):
|
117 |
-
v = getattr(g, attr, None)
|
118 |
-
if v is not None:
|
119 |
-
setattr(g, attr, visitor.scale(v))
|
120 |
-
|
121 |
-
if g.isComposite():
|
122 |
-
for component in g.components:
|
123 |
-
component.x = visitor.scale(component.x)
|
124 |
-
component.y = visitor.scale(component.y)
|
125 |
-
continue
|
126 |
-
|
127 |
-
if g.isVarComposite():
|
128 |
-
for component in g.components:
|
129 |
-
for attr in ("translateX", "translateY", "tCenterX", "tCenterY"):
|
130 |
-
v = getattr(component.transform, attr)
|
131 |
-
setattr(component.transform, attr, visitor.scale(v))
|
132 |
-
continue
|
133 |
-
|
134 |
-
if hasattr(g, "coordinates"):
|
135 |
-
coordinates = g.coordinates
|
136 |
-
for i, (x, y) in enumerate(coordinates):
|
137 |
-
coordinates[i] = visitor.scale(x), visitor.scale(y)
|
138 |
-
|
139 |
-
|
140 |
-
@ScalerVisitor.register_attr(ttLib.getTableClass("gvar"), "variations")
|
141 |
-
def visit(visitor, obj, attr, variations):
|
142 |
-
|
143 |
-
# VarComposites are a pain to handle :-(
|
144 |
-
glyfTable = visitor.font["glyf"]
|
145 |
-
|
146 |
-
for glyphName, varlist in variations.items():
|
147 |
-
glyph = glyfTable[glyphName]
|
148 |
-
isVarComposite = glyph.isVarComposite()
|
149 |
-
for var in varlist:
|
150 |
-
coordinates = var.coordinates
|
151 |
-
|
152 |
-
if not isVarComposite:
|
153 |
-
for i, xy in enumerate(coordinates):
|
154 |
-
if xy is None:
|
155 |
-
continue
|
156 |
-
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
|
157 |
-
continue
|
158 |
-
|
159 |
-
# VarComposite glyph
|
160 |
-
|
161 |
-
i = 0
|
162 |
-
for component in glyph.components:
|
163 |
-
if component.flags & VarComponentFlags.AXES_HAVE_VARIATION:
|
164 |
-
i += len(component.location)
|
165 |
-
if component.flags & (
|
166 |
-
VarComponentFlags.HAVE_TRANSLATE_X
|
167 |
-
| VarComponentFlags.HAVE_TRANSLATE_Y
|
168 |
-
):
|
169 |
-
xy = coordinates[i]
|
170 |
-
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
|
171 |
-
i += 1
|
172 |
-
if component.flags & VarComponentFlags.HAVE_ROTATION:
|
173 |
-
i += 1
|
174 |
-
if component.flags & (
|
175 |
-
VarComponentFlags.HAVE_SCALE_X | VarComponentFlags.HAVE_SCALE_Y
|
176 |
-
):
|
177 |
-
i += 1
|
178 |
-
if component.flags & (
|
179 |
-
VarComponentFlags.HAVE_SKEW_X | VarComponentFlags.HAVE_SKEW_Y
|
180 |
-
):
|
181 |
-
i += 1
|
182 |
-
if component.flags & (
|
183 |
-
VarComponentFlags.HAVE_TCENTER_X | VarComponentFlags.HAVE_TCENTER_Y
|
184 |
-
):
|
185 |
-
xy = coordinates[i]
|
186 |
-
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
|
187 |
-
i += 1
|
188 |
-
|
189 |
-
# Phantom points
|
190 |
-
assert i + 4 == len(coordinates)
|
191 |
-
for i in range(i, len(coordinates)):
|
192 |
-
xy = coordinates[i]
|
193 |
-
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
|
194 |
-
|
195 |
-
|
196 |
-
@ScalerVisitor.register_attr(ttLib.getTableClass("kern"), "kernTables")
|
197 |
-
def visit(visitor, obj, attr, kernTables):
|
198 |
-
for table in kernTables:
|
199 |
-
kernTable = table.kernTable
|
200 |
-
for k in kernTable.keys():
|
201 |
-
kernTable[k] = visitor.scale(kernTable[k])
|
202 |
-
|
203 |
-
|
204 |
-
def _cff_scale(visitor, args):
|
205 |
-
for i, arg in enumerate(args):
|
206 |
-
if not isinstance(arg, list):
|
207 |
-
if not isinstance(arg, bytes):
|
208 |
-
args[i] = visitor.scale(arg)
|
209 |
-
else:
|
210 |
-
num_blends = arg[-1]
|
211 |
-
_cff_scale(visitor, arg)
|
212 |
-
arg[-1] = num_blends
|
213 |
-
|
214 |
-
|
215 |
-
@ScalerVisitor.register_attr(
|
216 |
-
(ttLib.getTableClass("CFF "), ttLib.getTableClass("CFF2")), "cff"
|
217 |
-
)
|
218 |
-
def visit(visitor, obj, attr, cff):
|
219 |
-
cff.desubroutinize()
|
220 |
-
topDict = cff.topDictIndex[0]
|
221 |
-
varStore = getattr(topDict, "VarStore", None)
|
222 |
-
getNumRegions = varStore.getNumRegions if varStore is not None else None
|
223 |
-
privates = set()
|
224 |
-
for fontname in cff.keys():
|
225 |
-
font = cff[fontname]
|
226 |
-
cs = font.CharStrings
|
227 |
-
for g in font.charset:
|
228 |
-
c, _ = cs.getItemAndSelector(g)
|
229 |
-
privates.add(c.private)
|
230 |
-
|
231 |
-
commands = cffSpecializer.programToCommands(
|
232 |
-
c.program, getNumRegions=getNumRegions
|
233 |
-
)
|
234 |
-
for op, args in commands:
|
235 |
-
if op == "vsindex":
|
236 |
-
continue
|
237 |
-
_cff_scale(visitor, args)
|
238 |
-
c.program[:] = cffSpecializer.commandsToProgram(commands)
|
239 |
-
|
240 |
-
# Annoying business of scaling numbers that do not matter whatsoever
|
241 |
-
|
242 |
-
for attr in (
|
243 |
-
"UnderlinePosition",
|
244 |
-
"UnderlineThickness",
|
245 |
-
"FontBBox",
|
246 |
-
"StrokeWidth",
|
247 |
-
):
|
248 |
-
value = getattr(topDict, attr, None)
|
249 |
-
if value is None:
|
250 |
-
continue
|
251 |
-
if isinstance(value, list):
|
252 |
-
_cff_scale(visitor, value)
|
253 |
-
else:
|
254 |
-
setattr(topDict, attr, visitor.scale(value))
|
255 |
-
|
256 |
-
for i in range(6):
|
257 |
-
topDict.FontMatrix[i] /= visitor.scaleFactor
|
258 |
-
|
259 |
-
for private in privates:
|
260 |
-
for attr in (
|
261 |
-
"BlueValues",
|
262 |
-
"OtherBlues",
|
263 |
-
"FamilyBlues",
|
264 |
-
"FamilyOtherBlues",
|
265 |
-
# "BlueScale",
|
266 |
-
# "BlueShift",
|
267 |
-
# "BlueFuzz",
|
268 |
-
"StdHW",
|
269 |
-
"StdVW",
|
270 |
-
"StemSnapH",
|
271 |
-
"StemSnapV",
|
272 |
-
"defaultWidthX",
|
273 |
-
"nominalWidthX",
|
274 |
-
):
|
275 |
-
value = getattr(private, attr, None)
|
276 |
-
if value is None:
|
277 |
-
continue
|
278 |
-
if isinstance(value, list):
|
279 |
-
_cff_scale(visitor, value)
|
280 |
-
else:
|
281 |
-
setattr(private, attr, visitor.scale(value))
|
282 |
-
|
283 |
-
|
284 |
-
# ItemVariationStore
|
285 |
-
|
286 |
-
|
287 |
-
@ScalerVisitor.register(otTables.VarData)
|
288 |
-
def visit(visitor, varData):
|
289 |
-
for item in varData.Item:
|
290 |
-
for i, v in enumerate(item):
|
291 |
-
item[i] = visitor.scale(v)
|
292 |
-
varData.calculateNumShorts()
|
293 |
-
|
294 |
-
|
295 |
-
# COLRv1
|
296 |
-
|
297 |
-
|
298 |
-
def _setup_scale_paint(paint, scale):
|
299 |
-
if -2 <= scale <= 2 - (1 >> 14):
|
300 |
-
paint.Format = otTables.PaintFormat.PaintScaleUniform
|
301 |
-
paint.scale = scale
|
302 |
-
return
|
303 |
-
|
304 |
-
transform = otTables.Affine2x3()
|
305 |
-
transform.populateDefaults()
|
306 |
-
transform.xy = transform.yx = transform.dx = transform.dy = 0
|
307 |
-
transform.xx = transform.yy = scale
|
308 |
-
|
309 |
-
paint.Format = otTables.PaintFormat.PaintTransform
|
310 |
-
paint.Transform = transform
|
311 |
-
|
312 |
-
|
313 |
-
@ScalerVisitor.register(otTables.BaseGlyphPaintRecord)
|
314 |
-
def visit(visitor, record):
|
315 |
-
oldPaint = record.Paint
|
316 |
-
|
317 |
-
scale = otTables.Paint()
|
318 |
-
_setup_scale_paint(scale, visitor.scaleFactor)
|
319 |
-
scale.Paint = oldPaint
|
320 |
-
|
321 |
-
record.Paint = scale
|
322 |
-
|
323 |
-
return True
|
324 |
-
|
325 |
-
|
326 |
-
@ScalerVisitor.register(otTables.Paint)
|
327 |
-
def visit(visitor, paint):
|
328 |
-
if paint.Format != otTables.PaintFormat.PaintGlyph:
|
329 |
-
return True
|
330 |
-
|
331 |
-
newPaint = otTables.Paint()
|
332 |
-
newPaint.Format = paint.Format
|
333 |
-
newPaint.Paint = paint.Paint
|
334 |
-
newPaint.Glyph = paint.Glyph
|
335 |
-
del paint.Paint
|
336 |
-
del paint.Glyph
|
337 |
-
|
338 |
-
_setup_scale_paint(paint, 1 / visitor.scaleFactor)
|
339 |
-
paint.Paint = newPaint
|
340 |
-
|
341 |
-
visitor.visit(newPaint.Paint)
|
342 |
-
|
343 |
-
return False
|
344 |
-
|
345 |
-
|
346 |
-
def scale_upem(font, new_upem):
|
347 |
-
"""Change the units-per-EM of font to the new value."""
|
348 |
-
upem = font["head"].unitsPerEm
|
349 |
-
visitor = ScalerVisitor(new_upem / upem)
|
350 |
-
visitor.visit(font)
|
351 |
-
|
352 |
-
|
353 |
-
def main(args=None):
|
354 |
-
"""Change the units-per-EM of fonts"""
|
355 |
-
|
356 |
-
if args is None:
|
357 |
-
import sys
|
358 |
-
|
359 |
-
args = sys.argv[1:]
|
360 |
-
|
361 |
-
from fontTools.ttLib import TTFont
|
362 |
-
from fontTools.misc.cliTools import makeOutputFileName
|
363 |
-
import argparse
|
364 |
-
|
365 |
-
parser = argparse.ArgumentParser(
|
366 |
-
"fonttools ttLib.scaleUpem", description="Change the units-per-EM of fonts"
|
367 |
-
)
|
368 |
-
parser.add_argument("font", metavar="font", help="Font file.")
|
369 |
-
parser.add_argument(
|
370 |
-
"new_upem", metavar="new-upem", help="New units-per-EM integer value."
|
371 |
-
)
|
372 |
-
parser.add_argument(
|
373 |
-
"--output-file", metavar="path", default=None, help="Output file."
|
374 |
-
)
|
375 |
-
|
376 |
-
options = parser.parse_args(args)
|
377 |
-
|
378 |
-
font = TTFont(options.font)
|
379 |
-
new_upem = int(options.new_upem)
|
380 |
-
output_file = (
|
381 |
-
options.output_file
|
382 |
-
if options.output_file is not None
|
383 |
-
else makeOutputFileName(options.font, overWrite=True, suffix="-scaled")
|
384 |
-
)
|
385 |
-
|
386 |
-
scale_upem(font, new_upem)
|
387 |
-
|
388 |
-
print("Writing %s" % output_file)
|
389 |
-
font.save(output_file)
|
390 |
-
|
391 |
-
|
392 |
-
if __name__ == "__main__":
|
393 |
-
import sys
|
394 |
-
|
395 |
-
sys.exit(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/ShareButton-8cd3d8f6.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import{S as h,e as m,s as d,J as f,K as c,p as w,M as _,n as u,A as y,k as b,o as v,z as S,v as x,x as A,B}from"./index-1d65707a.js";import{I as M}from"./IconButton-d42f3661.js";import"./Button-f155035a.js";function C(r){let e,n;return{c(){e=f("svg"),n=f("path"),c(n,"d","M23,20a5,5,0,0,0-3.89,1.89L11.8,17.32a4.46,4.46,0,0,0,0-2.64l7.31-4.57A5,5,0,1,0,18,7a4.79,4.79,0,0,0,.2,1.32l-7.31,4.57a5,5,0,1,0,0,6.22l7.31,4.57A4.79,4.79,0,0,0,18,25a5,5,0,1,0,5-5ZM23,4a3,3,0,1,1-3,3A3,3,0,0,1,23,4ZM7,19a3,3,0,1,1,3-3A3,3,0,0,1,7,19Zm16,9a3,3,0,1,1,3-3A3,3,0,0,1,23,28Z"),c(n,"fill","currentColor"),c(e,"id","icon"),c(e,"xmlns","http://www.w3.org/2000/svg"),c(e,"viewBox","0 0 32 32")},m(t,a){w(t,e,a),_(e,n)},p:u,i:u,o:u,d(t){t&&y(e)}}}class k extends h{constructor(e){super(),m(this,e,null,C,d,{})}}class l extends Error{constructor(e){super(e),this.name="ShareError"}}const I=async(r,e)=>{if(window.__gradio_space__==null)throw new l("Must be on Spaces to share.");let n,t,a;if(e==="url"){const o=await fetch(r);n=await o.blob(),t=o.headers.get("content-type")||"",a=o.headers.get("content-disposition")||""}else n=E(r),t=r.split(";")[0].split(":")[1],a="file"+t.split("/")[1];const s=new File([n],a,{type:t}),i=await fetch("https://huggingface.co/uploads",{method:"POST",body:s,headers:{"Content-Type":s.type,"X-Requested-With":"XMLHttpRequest"}});if(!i.ok){if(i.headers.get("content-type")?.includes("application/json")){const o=await i.json();throw new l(`Upload failed: ${o.error}`)}throw new l("Upload failed.")}return await i.text()};function E(r){for(var e=r.split(","),n=e[0].match(/:(.*?);/)[1],t=atob(e[1]),a=t.length,s=new Uint8Array(a);a--;)s[a]=t.charCodeAt(a);return new Blob([s],{type:n})}function R(r){let e,n;return e=new M({props:{Icon:k,label:"Share",pending:r[2]}}),e.$on("click",r[4]),{c(){b(e.$$.fragment)},m(t,a){v(e,t,a),n=!0},p(t,[a]){const s={};a&4&&(s.pending=t[2]),e.$set(s)},i(t){n||(S(e.$$.fragment,t),n=!0)},o(t){x(e.$$.fragment,t),n=!1},d(t){A(e,t)}}}function T(r,e,n){const t=B();let{formatter:a}=e,{value:s}=e,i=!1;const p=async()=>{try{n(2,i=!0);const o=await a(s);t("share",{description:o})}catch(o){console.error(o);let g=o instanceof l?o.message:"Share failed.";t("error",g)}finally{n(2,i=!1)}};return r.$$set=o=>{"formatter"in o&&n(0,a=o.formatter),"value"in o&&n(1,s=o.value)},[a,s,i,t,p]}class L extends h{constructor(e){super(),m(this,e,T,R,d,{formatter:0,value:1})}}export{L as S,I as u};
|
2 |
-
//# sourceMappingURL=ShareButton-8cd3d8f6.js.map
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/spinner.py
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
"""A simple spinner module"""
|
2 |
-
import itertools
|
3 |
-
import sys
|
4 |
-
import threading
|
5 |
-
import time
|
6 |
-
|
7 |
-
|
8 |
-
class Spinner:
|
9 |
-
"""A simple spinner class"""
|
10 |
-
|
11 |
-
def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None:
|
12 |
-
"""Initialize the spinner class
|
13 |
-
|
14 |
-
Args:
|
15 |
-
message (str): The message to display.
|
16 |
-
delay (float): The delay between each spinner update.
|
17 |
-
"""
|
18 |
-
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
|
19 |
-
self.delay = delay
|
20 |
-
self.message = message
|
21 |
-
self.running = False
|
22 |
-
self.spinner_thread = None
|
23 |
-
|
24 |
-
def spin(self) -> None:
|
25 |
-
"""Spin the spinner"""
|
26 |
-
while self.running:
|
27 |
-
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
|
28 |
-
sys.stdout.flush()
|
29 |
-
time.sleep(self.delay)
|
30 |
-
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
31 |
-
|
32 |
-
def __enter__(self):
|
33 |
-
"""Start the spinner"""
|
34 |
-
self.running = True
|
35 |
-
self.spinner_thread = threading.Thread(target=self.spin)
|
36 |
-
self.spinner_thread.start()
|
37 |
-
|
38 |
-
return self
|
39 |
-
|
40 |
-
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
|
41 |
-
"""Stop the spinner
|
42 |
-
|
43 |
-
Args:
|
44 |
-
exc_type (Exception): The exception type.
|
45 |
-
exc_value (Exception): The exception value.
|
46 |
-
exc_traceback (Exception): The exception traceback.
|
47 |
-
"""
|
48 |
-
self.running = False
|
49 |
-
if self.spinner_thread is not None:
|
50 |
-
self.spinner_thread.join()
|
51 |
-
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
52 |
-
sys.stdout.flush()
|
53 |
-
|
54 |
-
def update_message(self, new_message, delay=0.1):
|
55 |
-
"""Update the spinner message
|
56 |
-
Args:
|
57 |
-
new_message (str): New message to display
|
58 |
-
delay: Delay in seconds before updating the message
|
59 |
-
"""
|
60 |
-
time.sleep(delay)
|
61 |
-
sys.stdout.write(
|
62 |
-
f"\r{' ' * (len(self.message) + 2)}\r"
|
63 |
-
) # Clear the current message
|
64 |
-
sys.stdout.flush()
|
65 |
-
self.message = new_message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|