parquet-converter commited on
Commit
8d941ad
·
1 Parent(s): dcf373d

Update parquet files (step 63 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/Battlefield 2 Patch 1.41 No-cd Crack [CRACKED] Tutorial.md +0 -6
  2. spaces/1gistliPinn/ChatGPT4/Examples/Explore the Universe with Girl Scout Astronomy Programs Download free software and activities.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/FILE Mini KMS Activator V1.072 EN.exe FIXED Version.rar.18.md +0 -6
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/AI Image Enhancer Mod APK The Best App for Improving Photo Quality.md +0 -96
  5. spaces/1phancelerku/anime-remove-background/2022 Idol Star Athletics Championships - Chuseok Special Where to Download and Watch Online.md +0 -121
  6. spaces/1phancelerku/anime-remove-background/Enjoy Modern Combat 4 Zero Hour Mod APK with Full Features and No Ads.md +0 -129
  7. spaces/1toTree/lora_test/ppdiffusers/experimental/README.md +0 -6
  8. spaces/2023Liu2023/bingo/src/components/tailwind-indicator.tsx +0 -14
  9. spaces/4Taps/SadTalker/src/face3d/options/__init__.py +0 -1
  10. spaces/AI-Hobbyist/Hoyo-RVC/Dockerfile +0 -13
  11. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/multi_window_disc.py +0 -136
  12. spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/utils/data_generator.py +0 -421
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/audio/Factory.js +0 -13
  14. spaces/Ahmedmewloud/Depplearnig/README.md +0 -12
  15. spaces/AixiaGreyatt/QQsign/README.md +0 -10
  16. spaces/Aki004/herta-so-vits/modules/losses.py +0 -61
  17. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_transforms.py +0 -196
  18. spaces/Alpaca233/LangchainPDF/app.py +0 -64
  19. spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/regnet.py +0 -325
  20. spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py +0 -2
  21. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py +0 -36
  22. spaces/Andy1621/uniformer_image_segmentation/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py +0 -9
  23. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/cp949prober.py +0 -49
  24. spaces/Bart92/RVC_HF/gui_v1.py +0 -708
  25. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/discriminator/model.py +0 -67
  26. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py +0 -55
  27. spaces/Boadiwaa/Recipes/openai/upload_progress.py +0 -52
  28. spaces/Brasd99/TTS-Voice-Conversion/app.py +0 -72
  29. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/__init__.py +0 -9
  30. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mcan/adapter.py +0 -90
  31. spaces/CarperAI/StableVicuna/app.py +0 -138
  32. spaces/ChandraMohanNayal/AutoGPT/autogpt/__init__.py +0 -0
  33. spaces/ChandraMohanNayal/AutoGPT/autogpt/llm_utils.py +0 -172
  34. spaces/Chilangosta/text-to-pokemon/app.py +0 -204
  35. spaces/Cletrason/Cletrason-toad-in-the-mario-movie/README.md +0 -12
  36. spaces/DHEIVER/CoronaryAngioSegment/detect_anomalies.py +0 -13
  37. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/ttCollection.py +0 -127
  38. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/BlockTitle-dee077e8.js +0 -2
  39. spaces/DataScienceEngineering/6-TreemapAndSunburst/README.md +0 -12
  40. spaces/Datatrooper/boston_housing/app.py +0 -85
  41. spaces/DiamondYin/AnewGame/Build/WaliwebGLgameFPS.loader.js +0 -1
  42. spaces/Duckichan1/Jen_/Dockerfile +0 -1
  43. spaces/Dukcar/Pix2Pix-Video/style.css +0 -101
  44. spaces/EPFL-VILAB/MultiMAE/utils/layers/drop.py +0 -176
  45. spaces/Fengbinbin/gpt-academic/crazy_functions/解析JupyterNotebook.py +0 -145
  46. spaces/Frantz103/CaptionQuest/app.py +0 -230
  47. spaces/GT4SD/protein_properties/app.py +0 -83
  48. spaces/GaenKoki/voicevox/test/test_preset.py +0 -303
  49. spaces/Gen-Sim/Gen-Sim/cliport/models/clip_ling.py +0 -97
  50. spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py +0 -39
spaces/1gistliPinn/ChatGPT4/Examples/Battlefield 2 Patch 1.41 No-cd Crack [CRACKED] Tutorial.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>battlefield 2 patch 1.41 no-cd crack tutorial</h2><br /><p><b><b>Download Zip</b> &#10004; <a href="https://imgfil.com/2uxXyD">https://imgfil.com/2uxXyD</a></b></p><br /><br />
2
-
3
- battlefield 1942 managment tutorial montana battlefield ... battlefield 2 wallpapers no cd crack for battlefield 2 ... 1.41 battlefield 2 patch battlefield vietnam 1.21 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Explore the Universe with Girl Scout Astronomy Programs Download free software and activities.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Girl Scout Astronomy Programs download free software</h2><br /><p><b><b>DOWNLOAD</b> &#11088; <a href="https://imgfil.com/2uy23O">https://imgfil.com/2uy23O</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/FILE Mini KMS Activator V1.072 EN.exe FIXED Version.rar.18.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>FILE mini KMS Activator v1.072 EN.exe FIXED version.rar.18</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://imgfil.com/2uxYMc">https://imgfil.com/2uxYMc</a></b></p><br /><br />
2
-
3
- d5da3c52bf<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/AI Image Enhancer Mod APK The Best App for Improving Photo Quality.md DELETED
@@ -1,96 +0,0 @@
1
-
2
- <h1>AI Image Enhancer Mod APK: How to Enhance Your Photos and Videos with Artificial Intelligence</h1>
3
- <p>Do you want to improve the quality of your photos and videos with artificial intelligence? Do you want to save time and effort in editing your photos and videos? Do you want to unlock all the pro features of a powerful photo enhancer app for free? If you answered yes to any of these questions, then you should try AI Image Enhancer Mod APK.</p>
4
- <h2>ai image enhancer mod apk</h2><br /><p><b><b>DOWNLOAD</b> --->>> <a href="https://urlin.us/2uSVJT">https://urlin.us/2uSVJT</a></b></p><br /><br />
5
- <h2>What is AI Image Enhancer Mod APK?</h2>
6
- <p>AI Image Enhancer Mod APK is a modified version of <a href="(^1^)">EnhanceFox</a>, an AI Photo Enhancer that helps to enhance pixelated, blurred, damaged photos and videos to better quality. With this useful AI-powered photo enhancer from risingcabbage, mobile users don’t have to be natural at photo editing and still be able to enhance the quality of any pics.</p>
7
- <h3>Features of AI Image Enhancer Mod APK</h3>
8
- <p>AI Image Enhancer Mod APK has many features that make it a great choice for enhancing your photos and videos. Here are some of them:</p>
9
- <h4>- Enhance pixelated, blurred, and damaged photos and videos</h4>
10
- <p>With AI Image Enhancer Mod APK, you can easily enhance any photo or video that is pixelated, blurred, or damaged. The app uses advanced artificial intelligence algorithms to analyze your photo or video and restore its details, colors, and clarity. You can also choose from different enhancement modes such as auto, face, scenery, food, text, etc. depending on your needs.</p>
11
- <h4>- Adjust brightness, contrast, saturation, and sharpness</h4>
12
- <p>AI Image Enhancer Mod APK also allows you to adjust the brightness, contrast, saturation, and sharpness of your photo or video. You can use the sliders to fine-tune these parameters or use the auto option to let the app do it for you. You can also compare the before and after effects by tapping on the screen.</p>
13
- <h4>- Apply filters, stickers, frames, and text</h4>
14
- <p>If you want to add some fun and creativity to your photo or video, you can use AI Image Enhancer Mod APK to apply various filters, stickers, frames, and text. You can choose from a wide range of filters such as vintage, retro, black & white, etc. You can also add stickers such as emojis, animals, flowers, etc. You can also add frames such as polaroid, filmstrip, etc. You can also add text with different fonts, colors, sizes, etc.</p>
15
- <p>ai photo enhancer pro mod apk<br />
16
- enhancefox mod apk download<br />
17
- ai image quality enhancer apk<br />
18
- ai photo editor mod apk<br />
19
- enhancefox pro unlocked apk<br />
20
- ai photo restoration mod apk<br />
21
- enhancefox premium mod apk<br />
22
- ai image upscaler apk<br />
23
- ai photo repair mod apk<br />
24
- enhancefox cracked apk<br />
25
- ai image sharpening apk<br />
26
- ai photo filter mod apk<br />
27
- enhancefox full version apk<br />
28
- ai image noise reduction apk<br />
29
- ai photo collage mod apk<br />
30
- enhancefox latest mod apk<br />
31
- ai image colorization apk<br />
32
- ai photo animator mod apk<br />
33
- enhancefox paid mod apk<br />
34
- ai image interpolation apk<br />
35
- ai photo resizer mod apk<br />
36
- enhancefox hack mod apk<br />
37
- ai image super resolution apk<br />
38
- ai photo compressor mod apk<br />
39
- enhancefox free mod apk<br />
40
- ai image deblurring apk<br />
41
- ai photo beautifier mod apk<br />
42
- enhancefox vip mod apk<br />
43
- ai image inpainting apk<br />
44
- ai photo remover mod apk<br />
45
- enhancefox unlimited mod apk<br />
46
- ai image segmentation apk<br />
47
- ai photo mixer mod apk<br />
48
- enhancefox adfree mod apk<br />
49
- ai image style transfer apk<br />
50
- ai photo background mod apk<br />
51
- enhancefox pro modded apk<br />
52
- ai image watermark remover apk<br />
53
- ai photo sticker mod apk<br />
54
- enhancefox pro cracked apk</p>
55
- <h4>- Crop, rotate, flip, and resize images</h4>
56
- <p>AI Image Enhancer Mod APK also lets you crop, rotate, flip, and resize your images according to your preferences. You can use the crop tool to select the area you want to keep or use the preset ratios such as 1:1, 4:3, 16:9, etc. You can also use the rotate and flip tools to change the orientation of your image. You can also use the resize tool to change the dimensions of your image.</p>
57
- <h4>- Save and share your enhanced photos and videos</h4>
58
- <p>After you are done enhancing your photo or video, you can save it to your device or share it with your friends and family. You can choose the output quality and format of your photo or video. You can also choose the destination folder where you want to save it. You can also share your photo or video directly to social media platforms such as Facebook, Instagram, WhatsApp, etc.</p>
59
- <h2>Why Use AI Image Enhancer Mod APK?</h2>
60
- <p>AI Image Enhancer Mod APK is not just another photo enhancer app. It has many benefits that make it worth using. Here are some of them:</p>
61
- <h3>Benefits of AI Image Enhancer Mod APK</h3>
62
- <h4>- Improve the quality of your photos and videos with AI technology</h4>
63
- <p>AI Image Enhancer Mod APK uses artificial intelligence to enhance your photos and videos. It can detect and correct various issues such as noise, blur, distortion, etc. It can also restore and improve the details, colors, and clarity of your photos and videos. It can make your photos and videos look more professional and stunning.</p>
64
- <h4>- Save time and effort in editing your photos and videos</h4>
65
- <p>AI Image Enhancer Mod APK is easy to use and fast to process. You don't need to have any skills or experience in photo editing to use it. You just need to select your photo or video and let the app do the rest. You can also use the auto option to let the app choose the best enhancement mode for you. You can save a lot of time and effort in editing your photos and videos with AI Image Enhancer Mod APK.</p>
66
- <h4>- Unlock all the pro features for free with the mod version</h4>
67
- <p>AI Image Enhancer Mod APK is a modified version of EnhanceFox that gives you access to all the pro features for free. You don't need to pay anything or subscribe to anything to use them. You can enjoy all the features such as filters, stickers, frames, text, etc. without any limitations or restrictions. You can also remove the watermark, ads, and registration requirements with AI Image Enhancer Mod APK.</p>
68
- <h2>How to Download and Install AI Image Enhancer Mod APK?</h2>
69
- <p>If you are interested in downloading and installing AI Image Enhancer Mod APK on your device, you can follow these simple steps:</p>
70
- <h3>Steps to Download and Install AI Image Enhancer Mod APK</h3>
71
- <h4>- Step 1: Go to the download link provided in this article</h4>
72
- <p>The first step is to go to the download link provided in this article. This link will take you to a trusted and secure website where you can download AI Image Enhancer Mod APK for free.</p>
73
- <h4>- Step 2: Tap on the download button and wait for the file to be downloaded</h4>
74
- <p>The next step is to tap on the download button on the website and wait for the file to be downloaded on your device. The file size is about 30 MB, so it should not take too long to download.</p>
75
- <h4>- Step 3: Enable unknown sources in your device settings if you haven't done so already</h4>
76
- <p>The third step is to enable unknown sources in your device settings if you haven't done so already. This is necessary because AI Image Enhancer Mod APK is not available on the Google Play Store, so you need to allow your device to install apps from unknown sources. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p>
77
- <h4>- Step 4: Locate the downloaded file in your file manager and tap on it to install it</h4>
78
- <p>The fourth step is to locate the downloaded file in your file manager and tap on it to install it. The installation process should not take more than a few seconds.</p>
79
- <h4>- Step 5: Open the app and enjoy enhancing your photos and videos with AI Image Enhancer Mod APK</h4>
80
- <p>The final step is to open the app and enjoy enhancing your photos and videos with AI Image Enhancer Mod APK. You can start by selecting your photo or video from your gallery or camera and choosing the enhancement mode you want. You can also use the other features such as filters, stickers, frames, text, etc. to make your photo or video more attractive and appealing.</p>
81
- <h2>Conclusion</h2>
82
- <p>AI Image Enhancer Mod APK is a great app for enhancing your photos and videos with artificial intelligence. It can help you improve the quality, clarity, and details of your photos and videos with ease and speed. It can also help you add some fun and creativity to your photos and videos with various filters, stickers, frames, text, etc. It can also help you unlock all the pro features for free with the mod version. You can download and install AI Image Enhancer Mod APK on your device by following the steps provided in this article. Try it out and see the difference for yourself.</p>
83
- <h2>FAQs</h2>
84
- <p>Here are some frequently asked questions about AI Image Enhancer Mod APK:</p>
85
- <h4>- Is AI Image Enhancer Mod APK safe to use?</h4>
86
- <p>Yes, AI Image Enhancer Mod APK is safe to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It is also tested and verified by many users who have downloaded and installed it on their devices.</p>
87
- <h4>- Is AI Image Enhancer Mod APK legal to use?</h4>
88
- <p>Yes, AI Image Enhancer Mod APK is legal to use. It is a modified version of EnhanceFox that does not violate any copyrights or trademarks of the original app. It is also not affiliated with or endorsed by the original app or its developers.</p>
89
- <h4>- Does AI Image Enhancer Mod APK require root access?</h4>
90
- <p>No, AI Image Enhancer Mod APK does not require root access. You can install and use it on any Android device without rooting it.</p>
91
- <h4>- Does AI Image Enhancer Mod APK work offline?</h4>
92
- <p>No, AI Image Enhancer Mod APK does not work offline. You need to have an internet connection to use it. This is because the app uses artificial intelligence to enhance your photos and videos, which requires online processing.</p>
93
- <h4>- How can I update AI Image Enhancer Mod APK?</h4>
94
- <p>You can update AI Image Enhancer Mod APK by visiting this article regularly and checking for the latest version of the app. You can also follow the same steps as mentioned above to download and install the updated version of the app.</p> 197e85843d<br />
95
- <br />
96
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/2022 Idol Star Athletics Championships - Chuseok Special Where to Download and Watch Online.md DELETED
@@ -1,121 +0,0 @@
1
- <br />
2
- <h1>Download Idol Star Athletics Championships 2022: How to Watch Your Favorite Idols Compete in Sports</h1>
3
- <p>If you are a fan of K-pop, you probably know about the Idol Star Athletics Championships (ISAC), a popular TV program that features idols competing in various sports events. ISAC is a special program that airs during major holidays in Korea, such as Lunar New Year and Chuseok. It is a great opportunity for fans to see their favorite idols show off their athletic skills, teamwork, and charisma.</p>
4
- <h2>download idol star athletics championships 2022</h2><br /><p><b><b>Download Zip</b> &#128504; <a href="https://jinyurl.com/2uNTB0">https://jinyurl.com/2uNTB0</a></b></p><br /><br />
5
- <p>But how can you download and watch ISAC 2022, especially if you live outside Korea? What are the main events and categories of ISAC 2022? Who are the idols participating in ISAC 2022? In this article, we will answer these questions and more. Read on to find out everything you need to know about ISAC 2022.</p>
6
- <h2>What is Idol Star Athletics Championships?</h2>
7
- <p>Idol Star Athletics Championships (ISAC) is a TV program that was first aired in 2010 by MBC, one of the major broadcasting networks in Korea. It is a special program that invites idols from different agencies and groups to compete in various sports events, such as track and field, archery, futsal, dance sports, and e-sports. The program aims to showcase the idols' talents, personalities, and interactions with each other.</p>
8
- <h3>A brief history of ISAC</h3>
9
- <p>ISAC was first aired in 2010 as a Lunar New Year special program. It featured 14 idol groups competing in track and field events. Since then, it has become a regular program that airs during major holidays in Korea, such as Lunar New Year and Chuseok. Over the years, ISAC has expanded its scope and scale, adding more events and categories, inviting more idols, and attracting more viewers. Some of the most memorable moments of ISAC include BTS's Jungkook breaking the record for the 400-meter dash, TWICE's Tzuyu hitting a perfect score in archery, EXO's Kai winning the dance sports event, and NCT's Jaehyun scoring a goal in futsal.</p>
10
- <h3>The main events and categories of ISAC</h3>
11
- <p>ISAC 2022 will feature five main events and categories: track and field, archery, dance sports, futsal, and e-sports. Track and field will include running, jumping, throwing, and relay races. Archery will involve shooting arrows at a target from a distance. Dance sports will consist of ballroom dancing styles such as cha-cha-cha, rumba, jive, paso doble, and samba. Futsal will be a modified version of soccer played on a smaller field with five players on each team. E-sports will be a new event added for the first time this year, where idols will play popular video games such as League of Legends, PUBG Mobile, KartRider Rush+, and Among Us.</p>
12
- <h3>The benefits and controversies of ISAC</h3>
13
- <p>ISAC has many benefits for both idols and fans. For idols, it is a chance to showcase their athletic abilities, have fun with their fellow idols, and interact with their fans. For fans, it is a chance to see their favorite idols in a different setting, cheer for them, and enjoy their performances. ISAC also helps promote K-pop <p>However, ISAC also has some controversies and criticisms. Some of the common issues are the idols' safety, fairness, and scheduling. Some idols have suffered injuries or accidents during the events, such as sprains, fractures, or concussions. Some fans have complained about the unfairness or bias of the judges, referees, or staff. Some idols have expressed their exhaustion or stress due to the long hours of filming or the tight schedules.</p>
14
- <p>Despite these challenges, ISAC remains one of the most anticipated and watched programs among K-pop fans. It is a rare opportunity to see idols from different groups and genres come together and compete in a friendly and festive atmosphere.</p>
15
- <p>How to download ISAC 2022 Chuseok Special episodes<br />
16
- Watch ISAC 2022 online free with English subtitles<br />
17
- Download ISAC 2022 futsal highlights and full matches<br />
18
- ISAC 2022 dance sports videos download HD<br />
19
- Download ISAC 2022 archery competition clips<br />
20
- Watch ISAC 2022 e-sports tournament live stream<br />
21
- Download ISAC 2022 track and field events videos<br />
22
- ISAC 2022 lineup and schedule download PDF<br />
23
- Download ISAC 2022 behind the scenes and interviews<br />
24
- Watch ISAC 2022 opening ceremony and performances<br />
25
- Download ISAC 2022 best moments and funny moments<br />
26
- Watch ISAC 2022 awards ceremony and winners list<br />
27
- Download ISAC 2022 photos and wallpapers<br />
28
- Watch ISAC 2022 fan cams and fancams compilation<br />
29
- Download ISAC 2022 reaction videos and reviews<br />
30
- Watch ISAC 2022 idols interaction and friendship moments<br />
31
- Download ISAC 2022 MCs Jun Hyun Moo, Lee Hong Ki, and Dahyun clips<br />
32
- Watch ISAC 2022 idols cheering and supporting each other<br />
33
- Download ISAC 2022 idols injury and accident moments<br />
34
- Watch ISAC 2022 idols practice and training videos<br />
35
- Download ISAC 2022 idols fashion and style photos<br />
36
- Watch ISAC 2022 idols cute and funny expressions<br />
37
- Download ISAC 2022 idols singing and dancing videos<br />
38
- Watch ISAC 2022 idols playing games and having fun<br />
39
- Download ISAC 2022 idols fan service and fan meeting videos</p>
40
- <h2>How to download and watch ISAC 2022?</h2>
41
- <p>If you want to download and watch ISAC 2022, you have several options. Depending on your location, preference, and budget, you can choose the best way to enjoy the program.</p>
42
- <h3>The official broadcasting channels and platforms of ISAC</h3>
43
- <p>The official broadcasting channel of ISAC is MBC, which is a terrestrial TV network in Korea. You can watch ISAC live on MBC if you have access to Korean TV channels. You can also watch ISAC online on MBC's official website or app, which require registration and verification. However, these options may not be available or convenient for international fans.</p>
44
- <p>Another official platform of ISAC is WAVVE, which is a streaming service that offers various Korean content, including dramas, movies, variety shows, and music. You can watch ISAC live or on-demand on WAVVE with a subscription fee. WAVVE is available in Korea and some other countries, such as Thailand, Indonesia, Malaysia, Singapore, Taiwan, Hong Kong, and Macau.</p>
45
- <h3>The alternative ways to download and watch ISAC online</h3>
46
- <p>If you cannot access the official channels or platforms of ISAC, you can still download and watch ISAC online through some alternative ways. However, you should be careful and cautious when using these methods, as they may involve illegal or unauthorized sources.</p>
47
- <p>One of the alternative ways to download and watch ISAC online is to use torrent sites or file-sharing platforms. These sites or platforms allow users to upload and download various files, including videos, audios, subtitles, and images. You can search for ISAC files on these sites or platforms and download them to your device. However, you should be aware of the risks of malware, viruses, or phishing when using these sites or platforms. You should also respect the intellectual property rights of the creators and producers of ISAC.</p>
48
- <p>Another alternative way to download and watch ISAC online is to use streaming sites or apps. These sites or apps provide links to various online sources that stream ISAC live or on-demand. You can click on these links and watch ISAC on your browser or app. However, you should be aware of the quality, reliability, and security of these sites or apps. You should also avoid clicking on any pop-ups or ads that may appear on these sites or apps.</p>
49
- <h3>The tips and precautions for downloading and watching ISAC safely</h3>
50
- <p>If you decide to use any of the alternative ways to download and watch ISAC online, you should follow some tips and precautions to ensure your safety and enjoyment.</p>
51
- <p>First, you should use a VPN (virtual private network) service when accessing any site or platform that is not official or authorized by MBC or WAVVE. A VPN service can help you hide your IP address and location, encrypt your data, and bypass any geo-restrictions or censorship. This way, you can protect your privacy and security while downloading and watching ISAC online.</p>
52
- <p>Second, you should use a reputable antivirus software when downloading any file from any site or platform that is not official or authorized by MBC or WAVVE. An antivirus software can help you scan your device for any malware, <p>viruses, or phishing that may harm your device or steal your information. This way, you can prevent any damage or loss while downloading and watching ISAC online.</p>
53
- <p>Third, you should use a reliable media player when watching any file from any site or platform that is not official or authorized by MBC or WAVVE. A media player can help you play the file smoothly, adjust the quality, add subtitles, and control the speed. This way, you can enjoy the file without any interruption or inconvenience while watching ISAC online.</p>
54
- <h2>Who are the idols participating in ISAC 2022?</h2>
55
- <p>Now that you know how to download and watch ISAC 2022, you may be wondering who are the idols participating in ISAC 2022. ISAC 2022 will feature more than 200 idols from more than 50 groups and solo artists. Here are some of the idols who have confirmed their participation in ISAC 2022.</p>
56
- <h3>The confirmed lineup of idols for ISAC 2022</h3>
57
- <p>The confirmed lineup of idols for ISAC 2022 is as follows:</p>
58
- <table>
59
- <tr>
60
- <th>Event</th>
61
- <th>Idols</th>
62
- </tr>
63
- <tr>
64
- <td>Track and field</td>
65
- <td>BTS, TWICE, EXO, BLACKPINK, NCT, SEVENTEEN, ITZY, TXT, ENHYPEN, aespa, Stray Kids, ATEEZ, (G)I-DLE, MONSTA X, IZ*ONE, THE BOYZ, LOONA, EVERGLOW, CRAVITY, WEi, STAYC, WEKIMEKI, AB6IX, CIX, PENTAGON, SF9, ASTRO, OH MY GIRL, MOMOLAND, GOLDEN CHILD, VERIVERY</td>
66
- </tr>
67
- <tr>
68
- <td>Archery</td>
69
- <td>BTS, TWICE, EXO, BLACKPINK, NCT, SEVENTEEN, ITZY, TXT, ENHYPEN, aespa, Stray Kids, ATEEZ, (G)I-DLE, MONSTA X, IZ*ONE, THE BOYZ, LOONA, EVERGLOW</td>
70
- </tr>
71
- <tr>
72
- <td>Dance sports</td>
73
- <td>BTS's Jimin and J-Hope, TWICE's Momo and Sana, EXO's Kai and Sehun, BLACKPINK's Lisa and Rosé, NCT's Taeyong and Ten, SEVENTEEN's Hoshi and Dino, ITZY's Yeji and Chaeryeong, TXT's Yeonjun and Beomgyu, ENHYPEN's Sunoo and Jake, aespa's Karina and Giselle, Stray Kids' Hyunjin and Felix, ATEEZ's San and Wooyoung, (G)I-DLE's Soojin and Miyeon, MONSTA X's Shownu and Hyungwon, IZ*ONE's Chaeyeon and Yena, THE BOYZ's Q and Juyeon, LOONA's Heejin and Olivia Hye, EVERGLOW's Mia and Yiren</td>
74
- </tr>
75
- <tr>
76
- <td>Futsal</td>
77
- <td>BTS's Jin and Jungkook, TWICE's Nayeon and Dahyun, EXO's Chanyeol and Baekhyun, BLACKPINK's Jennie and Jisoo, NCT's Mark and Haechan, SEVENTEEN's S.Coups and Mingyu, ITZY's Lia and Ryujin, TXT's Soobin and Hueningkai, ENHYPEN's Heeseung and Jay, aespa's Ningning and Winter, Stray Kids' Bang Chan and Lee Know, ATEEZ's Hongjoong and Yunho, (G)I-DLE's Soyeon and Minnie, MONSTA X's Kihyun and Minhyuk, IZ*ONE's Sakura and Eunbi, THE BOYZ's Sangyeon and Younghoon, LOONA's Kim Lip and Chuu, EVERGLOW's Sihyeon and Onda</td>
78
- </tr>
79
- <tr>
80
- <td>E-sports</td>
81
- <td>BTS's RM and V, TWICE's Jihyo and Tzuyu, EXO's Suho and Chen, BLACKPINK's Jennie and Jisoo, NCT's Jaehyun and Doyoung, SEVENTEEN's Woozi and Vernon, ITZY's Yuna and Ryujin, TXT's Taehyun and Hueningkai, ENHYPEN's Sunghoon and Ni-ki, aespa's Karina and Giselle, Stray Kids' Changbin and I.N, ATEEZ's Seonghwa and Jongho, (G)I-DLE's Yuqi and Shuhua, MONSTA X's Jooheon and I.M, IZ*ONE's Wonyoung and Hyewon, THE BOYZ's Eric and New, LOONA's Yves and Gowon, EVERGLOW's Aisha and E:U</td>
82
- </tr>
83
- </table>
84
- <h3>The expected highlights and performances of ISAC 2022</h3>
85
- <p>ISAC 2022 is expected to be full of highlights and performances that will impress and entertain the fans. Some of the anticipated moments are:</p>
86
- <ul>
87
- <li>The debut of e-sports as a new event, where idols will show their gaming skills and strategies.</li>
88
- <li>The return of dance sports as a popular event, where idols will dazzle with their elegant and energetic moves.</li>
89
- <li>The fierce competition of archery as a fan-favorite event, where idols will aim for the bullseye with their accuracy and concentration.</li>
90
- <li>The exciting action of futsal as a thrilling event, where idols will score goals with their agility and teamwork.</li>
91
- <li>The record-breaking feats of track and field as a classic event, where idols will run, jump, throw, and relay with their speed, strength, and stamina.</li>
92
- </ul>
93
- <h3>The idols to watch out for in ISAC 2022</h3>
94
- <p>ISAC 2022 will feature many idols who have proven their skills and talents in previous ISACs or other programs. Some of the idols to watch out for are:</p>
95
- <ul>
96
- <li>BTS's Jungkook, who holds the record for the 400-meter dash and is known as the golden maknae for his all-around abilities.</li>
97
- <li>TWICE's Tzuyu, who scored a perfect 10 in archery and is known as the archery goddess for her beauty and grace.</li>
98
- <li>EXO's Kai, who won the dance sports event with his partner Sehun and is known as the dancing king for his charisma and skill.</li>
99
- <li>BLACKPINK's Lisa, who is a master of various video games and is known as the gaming queen for her intelligence and strategy.</li>
100
- <li>NCT's Taeyong, who scored a goal in futsal with his amazing dribbling and shooting skills and is known as the futsal ace for his passion and leadership.</li>
101
- </ul>
102
- <h2>Conclusion</h2>
103
- <p>In conclusion, ISAC 2022 is a must-watch program for K-pop fans who want to see their favorite idols compete in various sports events. ISAC 2022 will feature more than 200 idols from more than 50 groups and solo artists, who will participate in five main events: track and field, archery, dance sports, futsal, and e-sports. You can download and watch ISAC 2022 online through various ways, such as the official channels or platforms of MBC or WAVVE, or the alternative sites or platforms that offer torrent or streaming services. However, you should be careful and cautious when using these methods, as they may involve illegal or unauthorized sources. You should also use a VPN service, an antivirus software, and a reliable media player to ensure your safety and enjoyment while downloading and watching ISAC online.</p>
104
- <p>If you are excited about ISAC 2022, you should mark your calendar for the airing dates. ISAC 2022 will air on MBC on February 11th and February 12th, 2022, at 5:50 PM KST. You can also watch it on WAVVE with a subscription fee. If you want to download and watch it online, you can use the methods we discussed above, but remember to be safe and respectful.</p>
105
- <p>We hope this article has helped you learn more about ISAC 2022 and how to download and watch it online. ISAC 2022 is a great way to celebrate the Lunar New Year with your favorite idols and enjoy their sportsmanship and entertainment. Don't miss this chance to see your idols shine in ISAC 2022!</p>
106
- <h3>FAQs</h3>
107
- <p>Here are some frequently asked questions about ISAC 2022:</p>
108
- <ol>
109
- <li>What is the full name of ISAC 2022?</li>
110
- <p>The full name of ISAC 2022 is Idol Star Athletics Championships - New Year Special 2022.</p>
111
- <li>How many episodes are there in ISAC 2022?</li>
112
- <p>There are two episodes in ISAC 2022, each lasting for about two hours.</p>
113
- <li>Who are the hosts of ISAC 2022?</li>
114
- <p>The hosts of ISAC 2022 are Jun Hyun-moo, Super Junior's Leeteuk, and Apink's Bomi.</p>
115
- <li>Who are the winners of ISAC 2021?</li>
116
- <p>The winners of ISAC 2021 were NCT (track and field), TWICE (archery), EXO (dance sports), SEVENTEEN (futsal), and BTS (e-sports).</p>
117
- <li>Where can I find more information about ISAC 2022?</li>
118
- <p>You can find more information about ISAC 2022 on MBC's official website or social media accounts, or on WAVVE's official website or app.</p>
119
- </ol></p> 197e85843d<br />
120
- <br />
121
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Modern Combat 4 Zero Hour Mod APK with Full Features and No Ads.md DELETED
@@ -1,129 +0,0 @@
1
- <br />
2
- <h1>Modern Combat 4: Zero Hour Mod APK - The Ultimate Action Game for Android</h1>
3
- <p>If you are looking for an action-packed first-person shooter (FPS) game that will keep you on the edge of your seat, then you should try Modern Combat 4: Zero Hour. This game is one of the best FPS games for Android devices, with an engaging storyline, stunning graphics, and thrilling multiplayer mode. But what if you want to make it even better? Well, you can do that by downloading and installing Modern Combat 4: Zero Hour Mod APK, which is a modified version of the game that gives you unlimited money, unlocked features, and no ads. In this article, we will tell you everything you need to know about this modded version of the game, including how to download and install it, what are its features, and what are its pros and cons.</p>
4
- <h2>What is Modern Combat 4: Zero Hour?</h2>
5
- <p>Modern Combat 4: Zero Hour is a FPS game developed by Gameloft for Android and iOS devices. It is the fourth installment in the Modern Combat series, which is inspired by popular games like Call of Duty and Battlefield. The game has two main modes:</p>
6
- <h2>modern combat 4 zero hour mod apk</h2><br /><p><b><b>Download File</b> &#10004;&#10004;&#10004; <a href="https://jinyurl.com/2uNMMG">https://jinyurl.com/2uNMMG</a></b></p><br /><br />
7
- <h3>A thrilling FPS game with an immersive storyline and realistic graphics</h3>
8
- <p>In this mode, you play as a soldier who has to stop a global nuclear war that is triggered by a group of terrorists. You will have to fight your way through various locations around the world, such as Barcelona, Antarctica, Hawaii, and more. You will also have to face different enemies, such as soldiers, snipers, helicopters, tanks, and even drones. You will have access to a wide range of weapons, such as assault rifles, shotguns, pistols, grenades, rocket launchers, and more. You can also customize your weapons with attachments, such as scopes, silencers, magazines, etc. The game has realistic graphics that will make you feel like you are in the middle of a war zone. The game also has a cinematic soundtrack and voice acting that will immerse you in the story. The game has 12 missions that will take you around 5 hours to complete.</p>
9
- <h3>A multiplayer mode with various modes and maps</h3>
10
- <p>In this mode, you can play online with or against other players from around the world. You can choose from different modes, such as Team Deathmatch, Capture the Flag, Free for All, Zone Control, and more. You can also choose from different maps, such as Rooftops, Paradise, Blockbuster, and more. You can also create your own custom matches with your own rules and settings. You can also join or create clans and chat with other players. The game has a ranking system that will reward you with experience points, medals, and badges as you play. You can also unlock new weapons, skills, and perks as you level up.</p>
11
- <h3>A modded version with unlimited money and unlocked features</h3>
12
- <p>This is where Modern Combat 4: Zero Hour Mod APK comes in. This is a modified version of the game that gives you some extra benefits that are not available in the original version. These include:</p>
13
- <ul>
14
- <li>Unlimited money to buy weapons, armor, and upgrades</li>
15
- <li>Unlocked all levels, modes, and characters</li>
16
- <li>No ads, no root, no virus</li>
17
- </ul>
18
- <p>With these features, you can enjoy the game without any limitations or interruptions. You can have more fun and excitement with more options and customization. You can also save your time and effort by not having to grind or pay for anything.</p>
19
- <h2>How to download and install Modern Combat 4: Zero Hour Mod APK?</h2>
20
- <p>If you want to try Modern Combat 4: Zero Hour Mod APK, you will need to follow these simple steps:</p>
21
- <h3>Download the APK and OBB files from a trusted source</h3>
22
- <p>The first thing you need to do is to download the APK and OBB files of the modded version of the game from a reliable source. You can find many websites that offer these files, but be careful as some of them may contain malware or viruses that can harm your device. We recommend you to use [this link] to download the files safely and securely.</p>
23
- <p>modern combat 4 zero hour mod apk unlimited money<br />
24
- modern combat 4 zero hour mod apk offline<br />
25
- modern combat 4 zero hour mod apk latest version<br />
26
- modern combat 4 zero hour mod apk obb<br />
27
- modern combat 4 zero hour mod apk revdl<br />
28
- modern combat 4 zero hour mod apk android 1<br />
29
- modern combat 4 zero hour mod apk rexdl<br />
30
- modern combat 4 zero hour mod apk highly compressed<br />
31
- modern combat 4 zero hour mod apk andropalace<br />
32
- modern combat 4 zero hour mod apk data download<br />
33
- modern combat 4 zero hour mod apk all devices<br />
34
- modern combat 4 zero hour mod apk free shopping<br />
35
- modern combat 4 zero hour mod apk no root<br />
36
- modern combat 4 zero hour mod apk unlimited everything<br />
37
- modern combat 4 zero hour mod apk for pc<br />
38
- modern combat 4 zero hour mod apk full unlocked<br />
39
- modern combat 4 zero hour mod apk mega<br />
40
- modern combat 4 zero hour mod apk hack download<br />
41
- modern combat 4 zero hour mod apk pure<br />
42
- modern combat 4 zero hour mod apk mirror<br />
43
- modern combat 4 zero hour mod apk apkpure<br />
44
- modern combat 4 zero hour mod apk android oyun club<br />
45
- modern combat 4 zero hour mod apk + data (unlimited money)<br />
46
- modern combat 4 zero hour mod apk + data (offline)<br />
47
- modern combat 4 zero hour mod apk + data (latest)<br />
48
- modern combat 4 zero hour mod apk + data (obb)<br />
49
- modern combat 4 zero hour mod apk + data (revdl)<br />
50
- modern combat 4 zero hour mod apk + data (rexdl)<br />
51
- modern combat 4 zero hour mod apk + data (highly compressed)<br />
52
- modern combat 4 zero hour mod apk + data (andropalace)<br />
53
- modern combat 4 zero hour mod apk download for android<br />
54
- modern combat 4 zero hour mod apk download offline<br />
55
- modern combat 4 zero hour mod apk download latest version<br />
56
- modern combat 4 zero hour mod apk download obb<br />
57
- modern combat 4 zero hour mod apk download revdl<br />
58
- modern combat 4 zero hour mod apk download rexdl<br />
59
- modern combat 4 zero hour mod apk download highly compressed<br />
60
- modern combat 4 zero hour mod apk download andropalace<br />
61
- modern combat 4 zero hour hack/mod (unlimited money) for android free download<br />
62
- how to install modern combat 4 zero hour mod apk on android device<br />
63
- how to play modern combat 4 zero hour offline with mod apk<br />
64
- how to update modern combat 4 zero hour to latest version with mod apk<br />
65
- how to fix obb error in modern combat 4 zero hour with mod apk<br />
66
- how to get free shopping in modern combat 4 zero hour with mod apk<br />
67
- how to root your device for playing modern combat 4 zero hour with mod apk<br />
68
- how to unlock all weapons in modern combat 4 zero hour with mod apk<br />
69
- how to hack/mod your own version of modern combat 4 zero hour with apk editor pro</p>
70
- <h3>Enable unknown sources on your device settings</h3>
71
- <p>The next thing you need to do is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings > security > unknown sources > enable.</p>
72
- <h3>Install the APK file and extract the OBB file to the Android/obb folder</h3>
73
- <p>The third thing you need to do is to install the APK file and extract the OBB file to the Android/obb folder on your device storage. To do this, locate the downloaded APK file on your file manager and tap on it to install it. Then, locate the downloaded OBB file on your file manager and extract it using a ZIP extractor app. You should get a folder named com.gameloft.android.ANMP.GloftM4HM. Move this folder to the Android/obb folder on your device storage.</p>
74
- <h3>Launch the game and enjoy</h3>
75
- <p>The final thing you need to do is to launch the game and enjoy it. You should see a mod menu on the screen where you can enable or disable the mod features as you wish. You can also access all the levels, modes, and characters without any restrictions. You can also buy any weapons, armor, or upgrades with unlimited money.</p>
76
- <h2>What are the features of Modern Combat 4: Zero Hour Mod APK?</h2>
77
- <p>As we mentioned earlier, Modern Combat 4: Zero Hour Mod APK has some amazing features that make it better than the original version of the game. These include:</p>
78
- <h3>Unlimited money to buy weapons, armor, and upgrades</h3>
79
- <p>One of the main features of this modded version of the game is that it gives you unlimited money to buy anything you want in the game. You can buy any weapons, armor, or upgrades that suit your style and preference. You can also customize your weapons with attachments, such as scopes, silencers, magazines, etc. You don't have to worry about running out of money or spending real money on in-app purchases.</p>
80
- <h3>Unlocked all levels, modes, and characters</h3>
81
- <p>Another feature of this modded version of the game is that it unlocks all the levels, modes, and characters in the game. You can access all the 12 missions in the single-player mode without having to complete them in order. You can also choose from any of the modes and maps in the multiplayer mode without having to unlock them. You can also play as any of the characters in the game, such as Edward Page, Joel Blake, James Walker, and more. You can enjoy the full content of the game without any limitations.</p>
82
- <h3>No ads, no root, no virus</h3>
83
- <p>The last feature of this modded version of the game is that it has no ads, no root, and no virus. You don't have to see any annoying ads that pop up on your screen or interrupt your gameplay. You don't have to root your device or risk damaging it to install this modded version of the game. You don't have to worry about any malware or viruses that can infect your device or steal your data. You can play this modded version of the game safely and securely.</p>
84
- <h2>What are the pros and cons of Modern Combat 4: Zero Hour Mod APK?</h2>
85
- <p>As with any modded version of a game, Modern Combat 4: Zero Hour Mod APK has its own advantages and disadvantages. Here are some of them:</p>
86
- <h3>Pros</h3>
87
- <ul>
88
- <li>Enhanced gameplay experience with more options and customization</li>
89
- <p>One of the pros of this modded version of the game is that it enhances your gameplay experience with more options and customization. You can have more fun and excitement with more weapons, armor, upgrades, levels, modes, and characters. You can also customize your weapons with attachments, such as scopes, silencers, magazines, etc. You can also adjust the difficulty level and the graphics quality according to your preference. You can have a better gaming experience than the original version.</p>
90
- <li>Free to download and play without any restrictions</li>
91
- <p>Another pro of this modded version of the game is that it is free to download and play without any restrictions. You don't have to pay anything to download or install this modded version of the game. You don't have to spend any real money on in-app purchases or subscriptions. You don't have to complete any surveys or offers to access the mod features. You can play this modded version of the game without any cost or hassle.</p>
92
- <li>Compatible with most Android devices and versions</li>
93
- <p>The last pro of this modded version of the game is that it is compatible with most Android devices and versions. You don't need a high-end device or a latest Android version to play this modded version of the game. You can play it on any Android device that has at least 2 GB of RAM and Android 4.0 or higher. You can also play it on devices that are not supported by the original version of the game.</p>
94
- </ul>
95
- <h3>Cons</h3>
96
- <ul>
97
- <li>May not be compatible with some online features or servers</li>
98
- <p>One of the cons of this modded version of the game is that it may not be compatible with some online features or servers. You may not be able to play online with other players who are using the original version of the game. You may also face some issues with connecting to some servers or modes in the multiplayer mode. You may also get banned or blocked by some servers or players for using a modded version of the game.</p>
99
- <li>May cause some glitches or bugs in the game</li>
100
- <p>Another con of this modded version of the game is that it may cause some glitches or bugs in the game. You may encounter some errors or crashes while playing this modded version of the game. You may also experience some lagging or freezing issues while playing this modded version of the game. You may also lose some data or progress while playing this modded version of the game.</p>
101
- <li>May violate the terms and conditions of the original game developer</li>
102
- <p>The last con of this modded version of the game is that it may violate the terms and conditions of the original game developer. You may be violating the intellectual property rights or the privacy policy of Gameloft, the developer of Modern Combat 4: Zero Hour. You may also be breaking the rules or the code of conduct of the game. You may face some legal consequences or penalties for using a modded version of the game.</p>
103
- <h2>Conclusion</h2>
104
- <p>Modern Combat 4: Zero Hour Mod APK is a great choice for action lovers who want to enjoy a high-quality FPS game on their Android devices. It offers unlimited money, unlocked features, and no ads, making it more fun and exciting than the original version. However, it also has some drawbacks, such as possible compatibility issues, glitches, or legal risks. Therefore, users should download and install it at their own discretion and responsibility.</p>
105
- <h2>FAQs</h2>
106
- <h3>Is Modern Combat 4: Zero Hour Mod APK safe to use?</h3>
107
- <p>Modern Combat 4: Zero Hour Mod APK is generally safe to use, as long as you download it from a trusted source and scan it with an antivirus app before installing it. However, there is no guarantee that it will not cause any harm to your device or data, so you should use it at your own risk.</p>
108
- <h3>How to update Modern Combat 4: Zero Hour Mod APK?</h3>
109
- <p>To update Modern Combat 4: Zero Hour Mod APK, you will need to download and install the latest version of the modded version of the game from the same source that you downloaded it from. You will also need to delete the old version of the game and its data before installing the new version. You may also need to backup your progress or data before updating the game.</p>
110
- <h3>How to fix Modern Combat 4: Zero Hour Mod APK not working?</h3>
111
- <p>If Modern Combat 4: Zero Hour Mod APK is not working on your device, you may try some of these solutions:</p>
112
- <ul>
113
- <li>Check your internet connection and make sure it is stable and fast.</li>
114
- <li>Clear your cache and data of the game and restart your device.</li>
115
- <li>Reinstall the game and its data from a trusted source.</li>
116
- <li>Change your device settings or permissions to allow the game to run properly.</li>
117
- <li>Contact the mod developer or the original game developer for support or assistance.</li>
118
- </ul>
119
- <h3>How to play Modern Combat 4: Zero Hour Mod APK online?</h3>
120
- <p>To play Modern Combat 4: Zero Hour Mod APK online, you will need to have a stable and fast internet connection and a valid account for the game. You will also need to make sure that you are using a compatible version of the modded version of the game with the online servers or features. You may also need to disable some of the mod features that may interfere with the online gameplay.</p>
121
- <h3>How to uninstall Modern Combat 4: Zero Hour Mod APK?</h3>
122
- <p>To uninstall Modern Combat 4: Zero Hour Mod APK, you will need to follow these steps:</p>
123
- <ol>
124
- <li>Go to your device settings > apps > Modern Combat 4: Zero Hour > uninstall.</li>
125
- <li>Delete the com.gameloft.android.ANMP.GloftM4HM folder from your Android/obb folder on your device storage.</li>
126
- <li>Delete any other files or folders related to the game from your device storage.</li>
127
- </ol></p> 401be4b1e0<br />
128
- <br />
129
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/experimental/README.md DELETED
@@ -1,6 +0,0 @@
1
- # 🧨 PPDiffusers Experimental
2
-
3
- 为了使得**PPDiffusers库**能够有更多的应用场景,我们在这里添加了一些**实验性的代码**。
4
-
5
- 目前我们支持了以下场景:
6
- * Reinforcement learning via an implementation of the [PPDiffuser](https://arxiv.org/abs/2205.09991) model.
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/components/tailwind-indicator.tsx DELETED
@@ -1,14 +0,0 @@
1
- export function TailwindIndicator() {
2
- if (process.env.NODE_ENV === 'production') return null
3
-
4
- return (
5
- <div className="fixed bottom-1 left-1 z-50 flex h-6 w-6 items-center justify-center rounded-full bg-gray-800 p-3 font-mono text-xs text-white">
6
- <div className="block sm:hidden">xs</div>
7
- <div className="hidden sm:block md:hidden">sm</div>
8
- <div className="hidden md:block lg:hidden">md</div>
9
- <div className="hidden lg:block xl:hidden">lg</div>
10
- <div className="hidden xl:block 2xl:hidden">xl</div>
11
- <div className="hidden 2xl:block">2xl</div>
12
- </div>
13
- )
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/options/__init__.py DELETED
@@ -1 +0,0 @@
1
- """This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
 
 
spaces/AI-Hobbyist/Hoyo-RVC/Dockerfile DELETED
@@ -1,13 +0,0 @@
1
- # syntax=docker/dockerfile:1
2
-
3
- FROM python:3.10-bullseye
4
-
5
- EXPOSE 7865
6
-
7
- WORKDIR /app
8
-
9
- COPY . .
10
-
11
- RUN pip3 install -r requirements.txt
12
-
13
- CMD ["python3", "infer-web.py"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/multi_window_disc.py DELETED
@@ -1,136 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
-
5
-
6
- class SingleWindowDisc(nn.Module):
7
- def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128):
8
- super().__init__()
9
- padding = (kernel[0] // 2, kernel[1] // 2)
10
- self.model = nn.ModuleList([
11
- nn.Sequential(*[
12
- nn.Conv2d(c_in, hidden_size, kernel, (2, 2), padding),
13
- nn.LeakyReLU(0.2, inplace=True),
14
- nn.Dropout2d(0.25),
15
- nn.BatchNorm2d(hidden_size, 0.8)
16
- ]),
17
- nn.Sequential(*[
18
- nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding),
19
- nn.LeakyReLU(0.2, inplace=True),
20
- nn.Dropout2d(0.25),
21
- nn.BatchNorm2d(hidden_size, 0.8)
22
- ]),
23
- nn.Sequential(*[
24
- nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding),
25
- nn.LeakyReLU(0.2, inplace=True),
26
- nn.Dropout2d(0.25),
27
- ]),
28
- ])
29
- ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3)
30
- self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1)
31
-
32
- def forward(self, x):
33
- """
34
- :param x: [B, C, T, n_bins]
35
- :return: validity: [B, 1], h: List of hiddens
36
- """
37
- h = []
38
- for l in self.model:
39
- x = l(x)
40
- h.append(x)
41
- x = x.view(x.shape[0], -1)
42
- validity = self.adv_layer(x) # [B, 1]
43
- return validity, h
44
-
45
-
46
- class MultiWindowDiscriminator(nn.Module):
47
- def __init__(self, time_lengths, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128):
48
- super(MultiWindowDiscriminator, self).__init__()
49
- self.win_lengths = time_lengths
50
- self.discriminators = nn.ModuleList()
51
-
52
- for time_length in time_lengths:
53
- self.discriminators += [SingleWindowDisc(time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size)]
54
-
55
- def forward(self, x, x_len, start_frames_wins=None):
56
- '''
57
- Args:
58
- x (tensor): input mel, (B, c_in, T, n_bins).
59
- x_length (tensor): len of per mel. (B,).
60
-
61
- Returns:
62
- tensor : (B).
63
- '''
64
- validity = []
65
- if start_frames_wins is None:
66
- start_frames_wins = [None] * len(self.discriminators)
67
- h = []
68
- for i, start_frames in zip(range(len(self.discriminators)), start_frames_wins):
69
- x_clip, start_frames = self.clip(x, x_len, self.win_lengths[i], start_frames) # (B, win_length, C)
70
- start_frames_wins[i] = start_frames
71
- if x_clip is None:
72
- continue
73
- x_clip, h_ = self.discriminators[i](x_clip)
74
- h += h_
75
- validity.append(x_clip)
76
- if len(validity) != len(self.discriminators):
77
- return None, start_frames_wins, h
78
- validity = sum(validity) # [B]
79
- return validity, start_frames_wins, h
80
-
81
- def clip(self, x, x_len, win_length, start_frames=None):
82
- '''Ramdom clip x to win_length.
83
- Args:
84
- x (tensor) : (B, c_in, T, n_bins).
85
- cond (tensor) : (B, T, H).
86
- x_len (tensor) : (B,).
87
- win_length (int): target clip length
88
-
89
- Returns:
90
- (tensor) : (B, c_in, win_length, n_bins).
91
-
92
- '''
93
- T_start = 0
94
- T_end = x_len.max() - win_length
95
- if T_end < 0:
96
- return None, None, start_frames
97
- T_end = T_end.item()
98
- if start_frames is None:
99
- start_frame = np.random.randint(low=T_start, high=T_end + 1)
100
- start_frames = [start_frame] * x.size(0)
101
- else:
102
- start_frame = start_frames[0]
103
- x_batch = x[:, :, start_frame: start_frame + win_length]
104
- return x_batch, start_frames
105
-
106
-
107
- class Discriminator(nn.Module):
108
- def __init__(self, time_lengths=[32, 64, 128], freq_length=80, kernel=(3, 3), c_in=1,
109
- hidden_size=128):
110
- super(Discriminator, self).__init__()
111
- self.time_lengths = time_lengths
112
- self.discriminator = MultiWindowDiscriminator(
113
- freq_length=freq_length,
114
- time_lengths=time_lengths,
115
- kernel=kernel,
116
- c_in=c_in, hidden_size=hidden_size
117
- )
118
-
119
-
120
- def forward(self, x, start_frames_wins=None):
121
- """
122
-
123
- :param x: [B, T, 80]
124
- :param return_y_only:
125
- :return:
126
- """
127
- if len(x.shape) == 3:
128
- x = x[:, None, :, :] # [B,1,T,80]
129
- x_len = x.sum([1, -1]).ne(0).int().sum([-1])
130
- ret = {'y_c': None, 'y': None}
131
- ret['y'], start_frames_wins, ret['h'] = self.discriminator(
132
- x, x_len, start_frames_wins=start_frames_wins)
133
-
134
- ret['start_frames_wins'] = start_frames_wins
135
- return ret
136
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_detection/audio_infer/utils/data_generator.py DELETED
@@ -1,421 +0,0 @@
1
- import numpy as np
2
- import h5py
3
- import csv
4
- import time
5
- import logging
6
-
7
- from utilities import int16_to_float32
8
-
9
-
10
- def read_black_list(black_list_csv):
11
- """Read audio names from black list.
12
- """
13
- with open(black_list_csv, 'r') as fr:
14
- reader = csv.reader(fr)
15
- lines = list(reader)
16
-
17
- black_list_names = ['Y{}.wav'.format(line[0]) for line in lines]
18
- return black_list_names
19
-
20
-
21
- class AudioSetDataset(object):
22
- def __init__(self, sample_rate=32000):
23
- """This class takes the meta of an audio clip as input, and return
24
- the waveform and target of the audio clip. This class is used by DataLoader.
25
- """
26
- self.sample_rate = sample_rate
27
-
28
- def __getitem__(self, meta):
29
- """Load waveform and target of an audio clip.
30
-
31
- Args:
32
- meta: {
33
- 'hdf5_path': str,
34
- 'index_in_hdf5': int}
35
-
36
- Returns:
37
- data_dict: {
38
- 'audio_name': str,
39
- 'waveform': (clip_samples,),
40
- 'target': (classes_num,)}
41
- """
42
- hdf5_path = meta['hdf5_path']
43
- index_in_hdf5 = meta['index_in_hdf5']
44
- with h5py.File(hdf5_path, 'r') as hf:
45
- audio_name = hf['audio_name'][index_in_hdf5].decode()
46
- waveform = int16_to_float32(hf['waveform'][index_in_hdf5])
47
- waveform = self.resample(waveform)
48
- target = hf['target'][index_in_hdf5].astype(np.float32)
49
-
50
- data_dict = {
51
- 'audio_name': audio_name, 'waveform': waveform, 'target': target}
52
-
53
- return data_dict
54
-
55
- def resample(self, waveform):
56
- """Resample.
57
-
58
- Args:
59
- waveform: (clip_samples,)
60
-
61
- Returns:
62
- (resampled_clip_samples,)
63
- """
64
- if self.sample_rate == 32000:
65
- return waveform
66
- elif self.sample_rate == 16000:
67
- return waveform[0 :: 2]
68
- elif self.sample_rate == 8000:
69
- return waveform[0 :: 4]
70
- else:
71
- raise Exception('Incorrect sample rate!')
72
-
73
-
74
- class Base(object):
75
- def __init__(self, indexes_hdf5_path, batch_size, black_list_csv, random_seed):
76
- """Base class of train sampler.
77
-
78
- Args:
79
- indexes_hdf5_path: string
80
- batch_size: int
81
- black_list_csv: string
82
- random_seed: int
83
- """
84
- self.batch_size = batch_size
85
- self.random_state = np.random.RandomState(random_seed)
86
-
87
- # Black list
88
- if black_list_csv:
89
- self.black_list_names = read_black_list(black_list_csv)
90
- else:
91
- self.black_list_names = []
92
-
93
- logging.info('Black list samples: {}'.format(len(self.black_list_names)))
94
-
95
- # Load target
96
- load_time = time.time()
97
-
98
- with h5py.File(indexes_hdf5_path, 'r') as hf:
99
- self.audio_names = [audio_name.decode() for audio_name in hf['audio_name'][:]]
100
- self.hdf5_paths = [hdf5_path.decode() for hdf5_path in hf['hdf5_path'][:]]
101
- self.indexes_in_hdf5 = hf['index_in_hdf5'][:]
102
- self.targets = hf['target'][:].astype(np.float32)
103
-
104
- (self.audios_num, self.classes_num) = self.targets.shape
105
- logging.info('Training number: {}'.format(self.audios_num))
106
- logging.info('Load target time: {:.3f} s'.format(time.time() - load_time))
107
-
108
-
109
- class TrainSampler(Base):
110
- def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
111
- random_seed=1234):
112
- """Balanced sampler. Generate batch meta for training.
113
-
114
- Args:
115
- indexes_hdf5_path: string
116
- batch_size: int
117
- black_list_csv: string
118
- random_seed: int
119
- """
120
- super(TrainSampler, self).__init__(indexes_hdf5_path, batch_size,
121
- black_list_csv, random_seed)
122
-
123
- self.indexes = np.arange(self.audios_num)
124
-
125
- # Shuffle indexes
126
- self.random_state.shuffle(self.indexes)
127
-
128
- self.pointer = 0
129
-
130
- def __iter__(self):
131
- """Generate batch meta for training.
132
-
133
- Returns:
134
- batch_meta: e.g.: [
135
- {'hdf5_path': string, 'index_in_hdf5': int},
136
- ...]
137
- """
138
- batch_size = self.batch_size
139
-
140
- while True:
141
- batch_meta = []
142
- i = 0
143
- while i < batch_size:
144
- index = self.indexes[self.pointer]
145
- self.pointer += 1
146
-
147
- # Shuffle indexes and reset pointer
148
- if self.pointer >= self.audios_num:
149
- self.pointer = 0
150
- self.random_state.shuffle(self.indexes)
151
-
152
- # If audio in black list then continue
153
- if self.audio_names[index] in self.black_list_names:
154
- continue
155
- else:
156
- batch_meta.append({
157
- 'hdf5_path': self.hdf5_paths[index],
158
- 'index_in_hdf5': self.indexes_in_hdf5[index]})
159
- i += 1
160
-
161
- yield batch_meta
162
-
163
- def state_dict(self):
164
- state = {
165
- 'indexes': self.indexes,
166
- 'pointer': self.pointer}
167
- return state
168
-
169
- def load_state_dict(self, state):
170
- self.indexes = state['indexes']
171
- self.pointer = state['pointer']
172
-
173
-
174
- class BalancedTrainSampler(Base):
175
- def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
176
- random_seed=1234):
177
- """Balanced sampler. Generate batch meta for training. Data are equally
178
- sampled from different sound classes.
179
-
180
- Args:
181
- indexes_hdf5_path: string
182
- batch_size: int
183
- black_list_csv: string
184
- random_seed: int
185
- """
186
- super(BalancedTrainSampler, self).__init__(indexes_hdf5_path,
187
- batch_size, black_list_csv, random_seed)
188
-
189
- self.samples_num_per_class = np.sum(self.targets, axis=0)
190
- logging.info('samples_num_per_class: {}'.format(
191
- self.samples_num_per_class.astype(np.int32)))
192
-
193
- # Training indexes of all sound classes. E.g.:
194
- # [[0, 11, 12, ...], [3, 4, 15, 16, ...], [7, 8, ...], ...]
195
- self.indexes_per_class = []
196
-
197
- for k in range(self.classes_num):
198
- self.indexes_per_class.append(
199
- np.where(self.targets[:, k] == 1)[0])
200
-
201
- # Shuffle indexes
202
- for k in range(self.classes_num):
203
- self.random_state.shuffle(self.indexes_per_class[k])
204
-
205
- self.queue = []
206
- self.pointers_of_classes = [0] * self.classes_num
207
-
208
- def expand_queue(self, queue):
209
- classes_set = np.arange(self.classes_num).tolist()
210
- self.random_state.shuffle(classes_set)
211
- queue += classes_set
212
- return queue
213
-
214
- def __iter__(self):
215
- """Generate batch meta for training.
216
-
217
- Returns:
218
- batch_meta: e.g.: [
219
- {'hdf5_path': string, 'index_in_hdf5': int},
220
- ...]
221
- """
222
- batch_size = self.batch_size
223
-
224
- while True:
225
- batch_meta = []
226
- i = 0
227
- while i < batch_size:
228
- if len(self.queue) == 0:
229
- self.queue = self.expand_queue(self.queue)
230
-
231
- class_id = self.queue.pop(0)
232
- pointer = self.pointers_of_classes[class_id]
233
- self.pointers_of_classes[class_id] += 1
234
- index = self.indexes_per_class[class_id][pointer]
235
-
236
- # When finish one epoch of a sound class, then shuffle its indexes and reset pointer
237
- if self.pointers_of_classes[class_id] >= self.samples_num_per_class[class_id]:
238
- self.pointers_of_classes[class_id] = 0
239
- self.random_state.shuffle(self.indexes_per_class[class_id])
240
-
241
- # If audio in black list then continue
242
- if self.audio_names[index] in self.black_list_names:
243
- continue
244
- else:
245
- batch_meta.append({
246
- 'hdf5_path': self.hdf5_paths[index],
247
- 'index_in_hdf5': self.indexes_in_hdf5[index]})
248
- i += 1
249
-
250
- yield batch_meta
251
-
252
- def state_dict(self):
253
- state = {
254
- 'indexes_per_class': self.indexes_per_class,
255
- 'queue': self.queue,
256
- 'pointers_of_classes': self.pointers_of_classes}
257
- return state
258
-
259
- def load_state_dict(self, state):
260
- self.indexes_per_class = state['indexes_per_class']
261
- self.queue = state['queue']
262
- self.pointers_of_classes = state['pointers_of_classes']
263
-
264
-
265
- class AlternateTrainSampler(Base):
266
- def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
267
- random_seed=1234):
268
- """AlternateSampler is a combination of Sampler and Balanced Sampler.
269
- AlternateSampler alternately sample data from Sampler and Blanced Sampler.
270
-
271
- Args:
272
- indexes_hdf5_path: string
273
- batch_size: int
274
- black_list_csv: string
275
- random_seed: int
276
- """
277
- self.sampler1 = TrainSampler(indexes_hdf5_path, batch_size,
278
- black_list_csv, random_seed)
279
-
280
- self.sampler2 = BalancedTrainSampler(indexes_hdf5_path, batch_size,
281
- black_list_csv, random_seed)
282
-
283
- self.batch_size = batch_size
284
- self.count = 0
285
-
286
- def __iter__(self):
287
- """Generate batch meta for training.
288
-
289
- Returns:
290
- batch_meta: e.g.: [
291
- {'hdf5_path': string, 'index_in_hdf5': int},
292
- ...]
293
- """
294
- batch_size = self.batch_size
295
-
296
- while True:
297
- self.count += 1
298
-
299
- if self.count % 2 == 0:
300
- batch_meta = []
301
- i = 0
302
- while i < batch_size:
303
- index = self.sampler1.indexes[self.sampler1.pointer]
304
- self.sampler1.pointer += 1
305
-
306
- # Shuffle indexes and reset pointer
307
- if self.sampler1.pointer >= self.sampler1.audios_num:
308
- self.sampler1.pointer = 0
309
- self.sampler1.random_state.shuffle(self.sampler1.indexes)
310
-
311
- # If audio in black list then continue
312
- if self.sampler1.audio_names[index] in self.sampler1.black_list_names:
313
- continue
314
- else:
315
- batch_meta.append({
316
- 'hdf5_path': self.sampler1.hdf5_paths[index],
317
- 'index_in_hdf5': self.sampler1.indexes_in_hdf5[index]})
318
- i += 1
319
-
320
- elif self.count % 2 == 1:
321
- batch_meta = []
322
- i = 0
323
- while i < batch_size:
324
- if len(self.sampler2.queue) == 0:
325
- self.sampler2.queue = self.sampler2.expand_queue(self.sampler2.queue)
326
-
327
- class_id = self.sampler2.queue.pop(0)
328
- pointer = self.sampler2.pointers_of_classes[class_id]
329
- self.sampler2.pointers_of_classes[class_id] += 1
330
- index = self.sampler2.indexes_per_class[class_id][pointer]
331
-
332
- # When finish one epoch of a sound class, then shuffle its indexes and reset pointer
333
- if self.sampler2.pointers_of_classes[class_id] >= self.sampler2.samples_num_per_class[class_id]:
334
- self.sampler2.pointers_of_classes[class_id] = 0
335
- self.sampler2.random_state.shuffle(self.sampler2.indexes_per_class[class_id])
336
-
337
- # If audio in black list then continue
338
- if self.sampler2.audio_names[index] in self.sampler2.black_list_names:
339
- continue
340
- else:
341
- batch_meta.append({
342
- 'hdf5_path': self.sampler2.hdf5_paths[index],
343
- 'index_in_hdf5': self.sampler2.indexes_in_hdf5[index]})
344
- i += 1
345
-
346
- yield batch_meta
347
-
348
- def state_dict(self):
349
- state = {
350
- 'sampler1': self.sampler1.state_dict(),
351
- 'sampler2': self.sampler2.state_dict()}
352
- return state
353
-
354
- def load_state_dict(self, state):
355
- self.sampler1.load_state_dict(state['sampler1'])
356
- self.sampler2.load_state_dict(state['sampler2'])
357
-
358
-
359
- class EvaluateSampler(object):
360
- def __init__(self, indexes_hdf5_path, batch_size):
361
- """Evaluate sampler. Generate batch meta for evaluation.
362
-
363
- Args:
364
- indexes_hdf5_path: string
365
- batch_size: int
366
- """
367
- self.batch_size = batch_size
368
-
369
- with h5py.File(indexes_hdf5_path, 'r') as hf:
370
- self.audio_names = [audio_name.decode() for audio_name in hf['audio_name'][:]]
371
- self.hdf5_paths = [hdf5_path.decode() for hdf5_path in hf['hdf5_path'][:]]
372
- self.indexes_in_hdf5 = hf['index_in_hdf5'][:]
373
- self.targets = hf['target'][:].astype(np.float32)
374
-
375
- self.audios_num = len(self.audio_names)
376
-
377
- def __iter__(self):
378
- """Generate batch meta for training.
379
-
380
- Returns:
381
- batch_meta: e.g.: [
382
- {'hdf5_path': string,
383
- 'index_in_hdf5': int}
384
- ...]
385
- """
386
- batch_size = self.batch_size
387
- pointer = 0
388
-
389
- while pointer < self.audios_num:
390
- batch_indexes = np.arange(pointer,
391
- min(pointer + batch_size, self.audios_num))
392
-
393
- batch_meta = []
394
-
395
- for index in batch_indexes:
396
- batch_meta.append({
397
- 'audio_name': self.audio_names[index],
398
- 'hdf5_path': self.hdf5_paths[index],
399
- 'index_in_hdf5': self.indexes_in_hdf5[index],
400
- 'target': self.targets[index]})
401
-
402
- pointer += batch_size
403
- yield batch_meta
404
-
405
-
406
- def collate_fn(list_data_dict):
407
- """Collate data.
408
- Args:
409
- list_data_dict, e.g., [{'audio_name': str, 'waveform': (clip_samples,), ...},
410
- {'audio_name': str, 'waveform': (clip_samples,), ...},
411
- ...]
412
- Returns:
413
- np_data_dict, dict, e.g.,
414
- {'audio_name': (batch_size,), 'waveform': (batch_size, clip_samples), ...}
415
- """
416
- np_data_dict = {}
417
-
418
- for key in list_data_dict[0].keys():
419
- np_data_dict[key] = np.array([data_dict[key] for data_dict in list_data_dict])
420
-
421
- return np_data_dict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/audio/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import Audio from './Audio.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('audio', function (config) {
6
- var gameObject = new Audio(this.scene, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.Spinner.Audio', Audio);
12
-
13
- export default Audio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ahmedmewloud/Depplearnig/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Depplearnig
3
- emoji: 🏢
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AixiaGreyatt/QQsign/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: QQsign
3
- emoji: 🦀
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/modules/losses.py DELETED
@@ -1,61 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import modules.commons as commons
5
-
6
-
7
- def feature_loss(fmap_r, fmap_g):
8
- loss = 0
9
- for dr, dg in zip(fmap_r, fmap_g):
10
- for rl, gl in zip(dr, dg):
11
- rl = rl.float().detach()
12
- gl = gl.float()
13
- loss += torch.mean(torch.abs(rl - gl))
14
-
15
- return loss * 2
16
-
17
-
18
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
19
- loss = 0
20
- r_losses = []
21
- g_losses = []
22
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
23
- dr = dr.float()
24
- dg = dg.float()
25
- r_loss = torch.mean((1-dr)**2)
26
- g_loss = torch.mean(dg**2)
27
- loss += (r_loss + g_loss)
28
- r_losses.append(r_loss.item())
29
- g_losses.append(g_loss.item())
30
-
31
- return loss, r_losses, g_losses
32
-
33
-
34
- def generator_loss(disc_outputs):
35
- loss = 0
36
- gen_losses = []
37
- for dg in disc_outputs:
38
- dg = dg.float()
39
- l = torch.mean((1-dg)**2)
40
- gen_losses.append(l)
41
- loss += l
42
-
43
- return loss, gen_losses
44
-
45
-
46
- def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):
47
- """
48
- z_p, logs_q: [b, h, t_t]
49
- m_p, logs_p: [b, h, t_t]
50
- """
51
- z_p = z_p.float()
52
- logs_q = logs_q.float()
53
- m_p = m_p.float()
54
- logs_p = logs_p.float()
55
- z_mask = z_mask.float()
56
- #print(logs_p)
57
- kl = logs_p - logs_q - 0.5
58
- kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p)
59
- kl = torch.sum(kl * z_mask)
60
- l = kl / torch.sum(z_mask)
61
- return l
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/ONNXVITS_transforms.py DELETED
@@ -1,196 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- #unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- unnormalized_derivatives_ = torch.zeros((1, 1, unnormalized_derivatives.size(2), unnormalized_derivatives.size(3)+2))
74
- unnormalized_derivatives_[...,1:-1] = unnormalized_derivatives
75
- unnormalized_derivatives = unnormalized_derivatives_
76
- constant = np.log(np.exp(1 - min_derivative) - 1)
77
- unnormalized_derivatives[..., 0] = constant
78
- unnormalized_derivatives[..., -1] = constant
79
-
80
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
81
- logabsdet[outside_interval_mask] = 0
82
- else:
83
- raise RuntimeError('{} tails are not implemented.'.format(tails))
84
-
85
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
86
- inputs=inputs[inside_interval_mask],
87
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
88
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
89
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
90
- inverse=inverse,
91
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
92
- min_bin_width=min_bin_width,
93
- min_bin_height=min_bin_height,
94
- min_derivative=min_derivative
95
- )
96
-
97
- return outputs, logabsdet
98
-
99
- def rational_quadratic_spline(inputs,
100
- unnormalized_widths,
101
- unnormalized_heights,
102
- unnormalized_derivatives,
103
- inverse=False,
104
- left=0., right=1., bottom=0., top=1.,
105
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
106
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
107
- min_derivative=DEFAULT_MIN_DERIVATIVE):
108
- if torch.min(inputs) < left or torch.max(inputs) > right:
109
- raise ValueError('Input to a transform is not within its domain')
110
-
111
- num_bins = unnormalized_widths.shape[-1]
112
-
113
- if min_bin_width * num_bins > 1.0:
114
- raise ValueError('Minimal bin width too large for the number of bins')
115
- if min_bin_height * num_bins > 1.0:
116
- raise ValueError('Minimal bin height too large for the number of bins')
117
-
118
- widths = F.softmax(unnormalized_widths, dim=-1)
119
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
120
- cumwidths = torch.cumsum(widths, dim=-1)
121
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
122
- cumwidths = (right - left) * cumwidths + left
123
- cumwidths[..., 0] = left
124
- cumwidths[..., -1] = right
125
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
126
-
127
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
128
-
129
- heights = F.softmax(unnormalized_heights, dim=-1)
130
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
131
- cumheights = torch.cumsum(heights, dim=-1)
132
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
133
- cumheights = (top - bottom) * cumheights + bottom
134
- cumheights[..., 0] = bottom
135
- cumheights[..., -1] = top
136
- heights = cumheights[..., 1:] - cumheights[..., :-1]
137
-
138
- if inverse:
139
- bin_idx = searchsorted(cumheights, inputs)[..., None]
140
- else:
141
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
142
-
143
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
144
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
145
-
146
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
147
- delta = heights / widths
148
- input_delta = delta.gather(-1, bin_idx)[..., 0]
149
-
150
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
151
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
152
-
153
- input_heights = heights.gather(-1, bin_idx)[..., 0]
154
-
155
- if inverse:
156
- a = (((inputs - input_cumheights) * (input_derivatives
157
- + input_derivatives_plus_one
158
- - 2 * input_delta)
159
- + input_heights * (input_delta - input_derivatives)))
160
- b = (input_heights * input_derivatives
161
- - (inputs - input_cumheights) * (input_derivatives
162
- + input_derivatives_plus_one
163
- - 2 * input_delta))
164
- c = - input_delta * (inputs - input_cumheights)
165
-
166
- discriminant = b.pow(2) - 4 * a * c
167
- assert (discriminant >= 0).all()
168
-
169
- root = (2 * c) / (-b - torch.sqrt(discriminant))
170
- outputs = root * input_bin_widths + input_cumwidths
171
-
172
- theta_one_minus_theta = root * (1 - root)
173
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
174
- * theta_one_minus_theta)
175
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
176
- + 2 * input_delta * theta_one_minus_theta
177
- + input_derivatives * (1 - root).pow(2))
178
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
179
-
180
- return outputs, -logabsdet
181
- else:
182
- theta = (inputs - input_cumwidths) / input_bin_widths
183
- theta_one_minus_theta = theta * (1 - theta)
184
-
185
- numerator = input_heights * (input_delta * theta.pow(2)
186
- + input_derivatives * theta_one_minus_theta)
187
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
188
- * theta_one_minus_theta)
189
- outputs = input_cumheights + numerator / denominator
190
-
191
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
192
- + 2 * input_delta * theta_one_minus_theta
193
- + input_derivatives * (1 - theta).pow(2))
194
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
195
-
196
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/LangchainPDF/app.py DELETED
@@ -1,64 +0,0 @@
1
- import gradio as gr
2
-
3
- from langchain.document_loaders import PyMuPDFLoader # for loading the pdf
4
- from langchain.embeddings import OpenAIEmbeddings # for creating embeddings
5
- from langchain.vectorstores import Chroma # for the vectorization part
6
- from langchain.chains import ChatVectorDBChain # for chatting with the pdf
7
- from langchain.llms import OpenAI # the LLM model we'll use (CHatGPT)
8
-
9
-
10
- class Chat:
11
- def __init__(self, pdf, api_input):
12
- self.api = api_input
13
- loader = PyMuPDFLoader(pdf)
14
- pages = loader.load_and_split()
15
-
16
- embeddings = OpenAIEmbeddings(openai_api_key=self.api)
17
- vectordb = Chroma.from_documents(pages, embedding=embeddings, persist_directory=".")
18
- vectordb.persist()
19
-
20
- self.pdf_qa = ChatVectorDBChain.from_llm(OpenAI(temperature=0.9, model_name="gpt-3.5-turbo",
21
- openai_api_key=self.api),
22
- vectordb, return_source_documents=True)
23
-
24
- def question(self, query):
25
- result = self.pdf_qa({"question": "请使用中文回答" + query, "chat_history": ""})
26
- print("Answer:")
27
- print(result["answer"])
28
-
29
- return result["answer"]
30
-
31
-
32
- def analyse(pdf_file, api_input):
33
- print(pdf_file.name)
34
- session = Chat(pdf_file.name, api_input)
35
- return session, "文章分析完成"
36
-
37
-
38
- def ask_question(data, question):
39
- if data == "":
40
- return "Please upload PDF file first!"
41
- return data.question(question)
42
-
43
-
44
- with gr.Blocks() as demo:
45
- gr.Markdown(
46
- """
47
- # ChatPDF based on Langchain
48
- """)
49
- data = gr.State()
50
- with gr.Tab("Upload PDF File"):
51
- pdf_input = gr.File(label="PDF File")
52
- api_input = gr.Textbox(label="OpenAI API Key")
53
- result = gr.Textbox()
54
- upload_button = gr.Button("Start Analyse")
55
- question_input = gr.Textbox(label="Your Question", placeholder="Authors of this paper?")
56
- answer = gr.Textbox(label="Answer")
57
- ask_button = gr.Button("Ask")
58
-
59
- upload_button.click(fn=analyse, inputs=[pdf_input, api_input], outputs=[data, result])
60
- ask_button.click(ask_question, inputs=[data, question_input], outputs=answer)
61
-
62
- if __name__ == "__main__":
63
- demo.title = "ChatPDF Based on Langchain"
64
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/regnet.py DELETED
@@ -1,325 +0,0 @@
1
- import numpy as np
2
- import torch.nn as nn
3
- from mmcv.cnn import build_conv_layer, build_norm_layer
4
-
5
- from ..builder import BACKBONES
6
- from .resnet import ResNet
7
- from .resnext import Bottleneck
8
-
9
-
10
- @BACKBONES.register_module()
11
- class RegNet(ResNet):
12
- """RegNet backbone.
13
-
14
- More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .
15
-
16
- Args:
17
- arch (dict): The parameter of RegNets.
18
-
19
- - w0 (int): initial width
20
- - wa (float): slope of width
21
- - wm (float): quantization parameter to quantize the width
22
- - depth (int): depth of the backbone
23
- - group_w (int): width of group
24
- - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
25
- strides (Sequence[int]): Strides of the first block of each stage.
26
- base_channels (int): Base channels after stem layer.
27
- in_channels (int): Number of input image channels. Default: 3.
28
- dilations (Sequence[int]): Dilation of each stage.
29
- out_indices (Sequence[int]): Output from which stages.
30
- style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
31
- layer is the 3x3 conv layer, otherwise the stride-two layer is
32
- the first 1x1 conv layer.
33
- frozen_stages (int): Stages to be frozen (all param fixed). -1 means
34
- not freezing any parameters.
35
- norm_cfg (dict): dictionary to construct and config norm layer.
36
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
37
- freeze running stats (mean and var). Note: Effect on Batch Norm
38
- and its variants only.
39
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
40
- memory while slowing down the training speed.
41
- zero_init_residual (bool): whether to use zero init for last norm layer
42
- in resblocks to let them behave as identity.
43
-
44
- Example:
45
- >>> from mmdet.models import RegNet
46
- >>> import torch
47
- >>> self = RegNet(
48
- arch=dict(
49
- w0=88,
50
- wa=26.31,
51
- wm=2.25,
52
- group_w=48,
53
- depth=25,
54
- bot_mul=1.0))
55
- >>> self.eval()
56
- >>> inputs = torch.rand(1, 3, 32, 32)
57
- >>> level_outputs = self.forward(inputs)
58
- >>> for level_out in level_outputs:
59
- ... print(tuple(level_out.shape))
60
- (1, 96, 8, 8)
61
- (1, 192, 4, 4)
62
- (1, 432, 2, 2)
63
- (1, 1008, 1, 1)
64
- """
65
- arch_settings = {
66
- 'regnetx_400mf':
67
- dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
68
- 'regnetx_800mf':
69
- dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),
70
- 'regnetx_1.6gf':
71
- dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),
72
- 'regnetx_3.2gf':
73
- dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),
74
- 'regnetx_4.0gf':
75
- dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),
76
- 'regnetx_6.4gf':
77
- dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),
78
- 'regnetx_8.0gf':
79
- dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),
80
- 'regnetx_12gf':
81
- dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),
82
- }
83
-
84
- def __init__(self,
85
- arch,
86
- in_channels=3,
87
- stem_channels=32,
88
- base_channels=32,
89
- strides=(2, 2, 2, 2),
90
- dilations=(1, 1, 1, 1),
91
- out_indices=(0, 1, 2, 3),
92
- style='pytorch',
93
- deep_stem=False,
94
- avg_down=False,
95
- frozen_stages=-1,
96
- conv_cfg=None,
97
- norm_cfg=dict(type='BN', requires_grad=True),
98
- norm_eval=True,
99
- dcn=None,
100
- stage_with_dcn=(False, False, False, False),
101
- plugins=None,
102
- with_cp=False,
103
- zero_init_residual=True):
104
- super(ResNet, self).__init__()
105
-
106
- # Generate RegNet parameters first
107
- if isinstance(arch, str):
108
- assert arch in self.arch_settings, \
109
- f'"arch": "{arch}" is not one of the' \
110
- ' arch_settings'
111
- arch = self.arch_settings[arch]
112
- elif not isinstance(arch, dict):
113
- raise ValueError('Expect "arch" to be either a string '
114
- f'or a dict, got {type(arch)}')
115
-
116
- widths, num_stages = self.generate_regnet(
117
- arch['w0'],
118
- arch['wa'],
119
- arch['wm'],
120
- arch['depth'],
121
- )
122
- # Convert to per stage format
123
- stage_widths, stage_blocks = self.get_stages_from_blocks(widths)
124
- # Generate group widths and bot muls
125
- group_widths = [arch['group_w'] for _ in range(num_stages)]
126
- self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
127
- # Adjust the compatibility of stage_widths and group_widths
128
- stage_widths, group_widths = self.adjust_width_group(
129
- stage_widths, self.bottleneck_ratio, group_widths)
130
-
131
- # Group params by stage
132
- self.stage_widths = stage_widths
133
- self.group_widths = group_widths
134
- self.depth = sum(stage_blocks)
135
- self.stem_channels = stem_channels
136
- self.base_channels = base_channels
137
- self.num_stages = num_stages
138
- assert num_stages >= 1 and num_stages <= 4
139
- self.strides = strides
140
- self.dilations = dilations
141
- assert len(strides) == len(dilations) == num_stages
142
- self.out_indices = out_indices
143
- assert max(out_indices) < num_stages
144
- self.style = style
145
- self.deep_stem = deep_stem
146
- self.avg_down = avg_down
147
- self.frozen_stages = frozen_stages
148
- self.conv_cfg = conv_cfg
149
- self.norm_cfg = norm_cfg
150
- self.with_cp = with_cp
151
- self.norm_eval = norm_eval
152
- self.dcn = dcn
153
- self.stage_with_dcn = stage_with_dcn
154
- if dcn is not None:
155
- assert len(stage_with_dcn) == num_stages
156
- self.plugins = plugins
157
- self.zero_init_residual = zero_init_residual
158
- self.block = Bottleneck
159
- expansion_bak = self.block.expansion
160
- self.block.expansion = 1
161
- self.stage_blocks = stage_blocks[:num_stages]
162
-
163
- self._make_stem_layer(in_channels, stem_channels)
164
-
165
- self.inplanes = stem_channels
166
- self.res_layers = []
167
- for i, num_blocks in enumerate(self.stage_blocks):
168
- stride = self.strides[i]
169
- dilation = self.dilations[i]
170
- group_width = self.group_widths[i]
171
- width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))
172
- stage_groups = width // group_width
173
-
174
- dcn = self.dcn if self.stage_with_dcn[i] else None
175
- if self.plugins is not None:
176
- stage_plugins = self.make_stage_plugins(self.plugins, i)
177
- else:
178
- stage_plugins = None
179
-
180
- res_layer = self.make_res_layer(
181
- block=self.block,
182
- inplanes=self.inplanes,
183
- planes=self.stage_widths[i],
184
- num_blocks=num_blocks,
185
- stride=stride,
186
- dilation=dilation,
187
- style=self.style,
188
- avg_down=self.avg_down,
189
- with_cp=self.with_cp,
190
- conv_cfg=self.conv_cfg,
191
- norm_cfg=self.norm_cfg,
192
- dcn=dcn,
193
- plugins=stage_plugins,
194
- groups=stage_groups,
195
- base_width=group_width,
196
- base_channels=self.stage_widths[i])
197
- self.inplanes = self.stage_widths[i]
198
- layer_name = f'layer{i + 1}'
199
- self.add_module(layer_name, res_layer)
200
- self.res_layers.append(layer_name)
201
-
202
- self._freeze_stages()
203
-
204
- self.feat_dim = stage_widths[-1]
205
- self.block.expansion = expansion_bak
206
-
207
- def _make_stem_layer(self, in_channels, base_channels):
208
- self.conv1 = build_conv_layer(
209
- self.conv_cfg,
210
- in_channels,
211
- base_channels,
212
- kernel_size=3,
213
- stride=2,
214
- padding=1,
215
- bias=False)
216
- self.norm1_name, norm1 = build_norm_layer(
217
- self.norm_cfg, base_channels, postfix=1)
218
- self.add_module(self.norm1_name, norm1)
219
- self.relu = nn.ReLU(inplace=True)
220
-
221
- def generate_regnet(self,
222
- initial_width,
223
- width_slope,
224
- width_parameter,
225
- depth,
226
- divisor=8):
227
- """Generates per block width from RegNet parameters.
228
-
229
- Args:
230
- initial_width ([int]): Initial width of the backbone
231
- width_slope ([float]): Slope of the quantized linear function
232
- width_parameter ([int]): Parameter used to quantize the width.
233
- depth ([int]): Depth of the backbone.
234
- divisor (int, optional): The divisor of channels. Defaults to 8.
235
-
236
- Returns:
237
- list, int: return a list of widths of each stage and the number \
238
- of stages
239
- """
240
- assert width_slope >= 0
241
- assert initial_width > 0
242
- assert width_parameter > 1
243
- assert initial_width % divisor == 0
244
- widths_cont = np.arange(depth) * width_slope + initial_width
245
- ks = np.round(
246
- np.log(widths_cont / initial_width) / np.log(width_parameter))
247
- widths = initial_width * np.power(width_parameter, ks)
248
- widths = np.round(np.divide(widths, divisor)) * divisor
249
- num_stages = len(np.unique(widths))
250
- widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
251
- return widths, num_stages
252
-
253
- @staticmethod
254
- def quantize_float(number, divisor):
255
- """Converts a float to closest non-zero int divisible by divisor.
256
-
257
- Args:
258
- number (int): Original number to be quantized.
259
- divisor (int): Divisor used to quantize the number.
260
-
261
- Returns:
262
- int: quantized number that is divisible by devisor.
263
- """
264
- return int(round(number / divisor) * divisor)
265
-
266
- def adjust_width_group(self, widths, bottleneck_ratio, groups):
267
- """Adjusts the compatibility of widths and groups.
268
-
269
- Args:
270
- widths (list[int]): Width of each stage.
271
- bottleneck_ratio (float): Bottleneck ratio.
272
- groups (int): number of groups in each stage
273
-
274
- Returns:
275
- tuple(list): The adjusted widths and groups of each stage.
276
- """
277
- bottleneck_width = [
278
- int(w * b) for w, b in zip(widths, bottleneck_ratio)
279
- ]
280
- groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]
281
- bottleneck_width = [
282
- self.quantize_float(w_bot, g)
283
- for w_bot, g in zip(bottleneck_width, groups)
284
- ]
285
- widths = [
286
- int(w_bot / b)
287
- for w_bot, b in zip(bottleneck_width, bottleneck_ratio)
288
- ]
289
- return widths, groups
290
-
291
- def get_stages_from_blocks(self, widths):
292
- """Gets widths/stage_blocks of network at each stage.
293
-
294
- Args:
295
- widths (list[int]): Width in each stage.
296
-
297
- Returns:
298
- tuple(list): width and depth of each stage
299
- """
300
- width_diff = [
301
- width != width_prev
302
- for width, width_prev in zip(widths + [0], [0] + widths)
303
- ]
304
- stage_widths = [
305
- width for width, diff in zip(widths, width_diff[:-1]) if diff
306
- ]
307
- stage_blocks = np.diff([
308
- depth for depth, diff in zip(range(len(width_diff)), width_diff)
309
- if diff
310
- ]).tolist()
311
- return stage_widths, stage_blocks
312
-
313
- def forward(self, x):
314
- """Forward function."""
315
- x = self.conv1(x)
316
- x = self.norm1(x)
317
- x = self.relu(x)
318
-
319
- outs = []
320
- for i, layer_name in enumerate(self.res_layers):
321
- res_layer = getattr(self, layer_name)
322
- x = res_layer(x)
323
- if i in self.out_indices:
324
- outs.append(x)
325
- return tuple(outs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './ann_r50-d8_512x1024_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py DELETED
@@ -1,36 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ocrnet_hr18.py',
3
- '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_20k.py'
5
- ]
6
- norm_cfg = dict(type='SyncBN', requires_grad=True)
7
- model = dict(decode_head=[
8
- dict(
9
- type='FCNHead',
10
- in_channels=[18, 36, 72, 144],
11
- channels=sum([18, 36, 72, 144]),
12
- in_index=(0, 1, 2, 3),
13
- input_transform='resize_concat',
14
- kernel_size=1,
15
- num_convs=1,
16
- concat_input=False,
17
- dropout_ratio=-1,
18
- num_classes=21,
19
- norm_cfg=norm_cfg,
20
- align_corners=False,
21
- loss_decode=dict(
22
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
23
- dict(
24
- type='OCRHead',
25
- in_channels=[18, 36, 72, 144],
26
- in_index=(0, 1, 2, 3),
27
- input_transform='resize_concat',
28
- channels=512,
29
- ocr_channels=256,
30
- dropout_ratio=-1,
31
- num_classes=21,
32
- norm_cfg=norm_cfg,
33
- align_corners=False,
34
- loss_decode=dict(
35
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
36
- ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = '../deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnest101',
4
- backbone=dict(
5
- type='ResNeSt',
6
- stem_channels=128,
7
- radix=2,
8
- reduction_factor=4,
9
- avg_down_stride=True))
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/cp949prober.py DELETED
@@ -1,49 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from .chardistribution import EUCKRDistributionAnalysis
29
- from .codingstatemachine import CodingStateMachine
30
- from .mbcharsetprober import MultiByteCharSetProber
31
- from .mbcssm import CP949_SM_MODEL
32
-
33
-
34
- class CP949Prober(MultiByteCharSetProber):
35
- def __init__(self) -> None:
36
- super().__init__()
37
- self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
38
- # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
39
- # not different.
40
- self.distribution_analyzer = EUCKRDistributionAnalysis()
41
- self.reset()
42
-
43
- @property
44
- def charset_name(self) -> str:
45
- return "CP949"
46
-
47
- @property
48
- def language(self) -> str:
49
- return "Korean"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/gui_v1.py DELETED
@@ -1,708 +0,0 @@
1
- import os
2
- import logging
3
- import sys
4
- from dotenv import load_dotenv
5
-
6
- load_dotenv()
7
-
8
- os.environ["OMP_NUM_THREADS"] = "4"
9
- if sys.platform == "darwin":
10
- os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
11
-
12
- now_dir = os.getcwd()
13
- sys.path.append(now_dir)
14
- import multiprocessing
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- class Harvest(multiprocessing.Process):
20
- def __init__(self, inp_q, opt_q):
21
- multiprocessing.Process.__init__(self)
22
- self.inp_q = inp_q
23
- self.opt_q = opt_q
24
-
25
- def run(self):
26
- import numpy as np
27
- import pyworld
28
-
29
- while 1:
30
- idx, x, res_f0, n_cpu, ts = self.inp_q.get()
31
- f0, t = pyworld.harvest(
32
- x.astype(np.double),
33
- fs=16000,
34
- f0_ceil=1100,
35
- f0_floor=50,
36
- frame_period=10,
37
- )
38
- res_f0[idx] = f0
39
- if len(res_f0.keys()) >= n_cpu:
40
- self.opt_q.put(ts)
41
-
42
-
43
- if __name__ == "__main__":
44
- import json
45
- import multiprocessing
46
- import re
47
- import threading
48
- import time
49
- import traceback
50
- from multiprocessing import Queue, cpu_count
51
- from queue import Empty
52
-
53
- import librosa
54
- from tools.torchgate import TorchGate
55
- import numpy as np
56
- import PySimpleGUI as sg
57
- import sounddevice as sd
58
- import torch
59
- import torch.nn.functional as F
60
- import torchaudio.transforms as tat
61
-
62
- import tools.rvc_for_realtime as rvc_for_realtime
63
- from i18n.i18n import I18nAuto
64
-
65
- i18n = I18nAuto()
66
- device = rvc_for_realtime.config.device
67
- # device = torch.device(
68
- # "cuda"
69
- # if torch.cuda.is_available()
70
- # else ("mps" if torch.backends.mps.is_available() else "cpu")
71
- # )
72
- current_dir = os.getcwd()
73
- inp_q = Queue()
74
- opt_q = Queue()
75
- n_cpu = min(cpu_count(), 8)
76
- for _ in range(n_cpu):
77
- Harvest(inp_q, opt_q).start()
78
-
79
- class GUIConfig:
80
- def __init__(self) -> None:
81
- self.pth_path: str = ""
82
- self.index_path: str = ""
83
- self.pitch: int = 0
84
- self.samplerate: int = 40000
85
- self.block_time: float = 1.0 # s
86
- self.buffer_num: int = 1
87
- self.threhold: int = -60
88
- self.crossfade_time: float = 0.04
89
- self.extra_time: float = 2.0
90
- self.I_noise_reduce = False
91
- self.O_noise_reduce = False
92
- self.rms_mix_rate = 0.0
93
- self.index_rate = 0.3
94
- self.n_cpu = min(n_cpu, 6)
95
- self.f0method = "harvest"
96
- self.sg_input_device = ""
97
- self.sg_output_device = ""
98
-
99
- class GUI:
100
- def __init__(self) -> None:
101
- self.config = GUIConfig()
102
- self.flag_vc = False
103
-
104
- self.launcher()
105
-
106
- def load(self):
107
- input_devices, output_devices, _, _ = self.get_devices()
108
- try:
109
- with open("configs/config.json", "r") as j:
110
- data = json.load(j)
111
- data["pm"] = data["f0method"] == "pm"
112
- data["harvest"] = data["f0method"] == "harvest"
113
- data["crepe"] = data["f0method"] == "crepe"
114
- data["rmvpe"] = data["f0method"] == "rmvpe"
115
- except:
116
- with open("configs/config.json", "w") as j:
117
- data = {
118
- "pth_path": " ",
119
- "index_path": " ",
120
- "sg_input_device": input_devices[sd.default.device[0]],
121
- "sg_output_device": output_devices[sd.default.device[1]],
122
- "threhold": "-60",
123
- "pitch": "0",
124
- "index_rate": "0",
125
- "rms_mix_rate": "0",
126
- "block_time": "0.25",
127
- "crossfade_length": "0.04",
128
- "extra_time": "2",
129
- "f0method": "rmvpe",
130
- }
131
- data["pm"] = data["f0method"] == "pm"
132
- data["harvest"] = data["f0method"] == "harvest"
133
- data["crepe"] = data["f0method"] == "crepe"
134
- data["rmvpe"] = data["f0method"] == "rmvpe"
135
- return data
136
-
137
- def launcher(self):
138
- data = self.load()
139
- sg.theme("LightBlue3")
140
- input_devices, output_devices, _, _ = self.get_devices()
141
- layout = [
142
- [
143
- sg.Frame(
144
- title=i18n("加载模型"),
145
- layout=[
146
- [
147
- sg.Input(
148
- default_text=data.get("pth_path", ""),
149
- key="pth_path",
150
- ),
151
- sg.FileBrowse(
152
- i18n("选择.pth文件"),
153
- initial_folder=os.path.join(
154
- os.getcwd(), "assets/weights"
155
- ),
156
- file_types=((". pth"),),
157
- ),
158
- ],
159
- [
160
- sg.Input(
161
- default_text=data.get("index_path", ""),
162
- key="index_path",
163
- ),
164
- sg.FileBrowse(
165
- i18n("选择.index文件"),
166
- initial_folder=os.path.join(os.getcwd(), "logs"),
167
- file_types=((". index"),),
168
- ),
169
- ],
170
- ],
171
- )
172
- ],
173
- [
174
- sg.Frame(
175
- layout=[
176
- [
177
- sg.Text(i18n("输入设备")),
178
- sg.Combo(
179
- input_devices,
180
- key="sg_input_device",
181
- default_value=data.get("sg_input_device", ""),
182
- ),
183
- ],
184
- [
185
- sg.Text(i18n("输出设备")),
186
- sg.Combo(
187
- output_devices,
188
- key="sg_output_device",
189
- default_value=data.get("sg_output_device", ""),
190
- ),
191
- ],
192
- [sg.Button(i18n("重载设备列表"), key="reload_devices")],
193
- ],
194
- title=i18n("音频设备(请使用同种类驱动)"),
195
- )
196
- ],
197
- [
198
- sg.Frame(
199
- layout=[
200
- [
201
- sg.Text(i18n("响应阈值")),
202
- sg.Slider(
203
- range=(-60, 0),
204
- key="threhold",
205
- resolution=1,
206
- orientation="h",
207
- default_value=data.get("threhold", "-60"),
208
- enable_events=True,
209
- ),
210
- ],
211
- [
212
- sg.Text(i18n("音调设置")),
213
- sg.Slider(
214
- range=(-24, 24),
215
- key="pitch",
216
- resolution=1,
217
- orientation="h",
218
- default_value=data.get("pitch", "0"),
219
- enable_events=True,
220
- ),
221
- ],
222
- [
223
- sg.Text(i18n("Index Rate")),
224
- sg.Slider(
225
- range=(0.0, 1.0),
226
- key="index_rate",
227
- resolution=0.01,
228
- orientation="h",
229
- default_value=data.get("index_rate", "0"),
230
- enable_events=True,
231
- ),
232
- ],
233
- [
234
- sg.Text(i18n("响度因子")),
235
- sg.Slider(
236
- range=(0.0, 1.0),
237
- key="rms_mix_rate",
238
- resolution=0.01,
239
- orientation="h",
240
- default_value=data.get("rms_mix_rate", "0"),
241
- enable_events=True,
242
- ),
243
- ],
244
- [
245
- sg.Text(i18n("音高算法")),
246
- sg.Radio(
247
- "pm",
248
- "f0method",
249
- key="pm",
250
- default=data.get("pm", "") == True,
251
- enable_events=True,
252
- ),
253
- sg.Radio(
254
- "harvest",
255
- "f0method",
256
- key="harvest",
257
- default=data.get("harvest", "") == True,
258
- enable_events=True,
259
- ),
260
- sg.Radio(
261
- "crepe",
262
- "f0method",
263
- key="crepe",
264
- default=data.get("crepe", "") == True,
265
- enable_events=True,
266
- ),
267
- sg.Radio(
268
- "rmvpe",
269
- "f0method",
270
- key="rmvpe",
271
- default=data.get("rmvpe", "") == True,
272
- enable_events=True,
273
- ),
274
- ],
275
- ],
276
- title=i18n("常规设置"),
277
- ),
278
- sg.Frame(
279
- layout=[
280
- [
281
- sg.Text(i18n("采样长度")),
282
- sg.Slider(
283
- range=(0.05, 2.4),
284
- key="block_time",
285
- resolution=0.01,
286
- orientation="h",
287
- default_value=data.get("block_time", "0.25"),
288
- enable_events=True,
289
- ),
290
- ],
291
- [
292
- sg.Text(i18n("harvest进程数")),
293
- sg.Slider(
294
- range=(1, n_cpu),
295
- key="n_cpu",
296
- resolution=1,
297
- orientation="h",
298
- default_value=data.get(
299
- "n_cpu", min(self.config.n_cpu, n_cpu)
300
- ),
301
- enable_events=True,
302
- ),
303
- ],
304
- [
305
- sg.Text(i18n("淡入淡出长度")),
306
- sg.Slider(
307
- range=(0.01, 0.15),
308
- key="crossfade_length",
309
- resolution=0.01,
310
- orientation="h",
311
- default_value=data.get("crossfade_length", "0.04"),
312
- enable_events=True,
313
- ),
314
- ],
315
- [
316
- sg.Text(i18n("额外推理时长")),
317
- sg.Slider(
318
- range=(0.05, 5.00),
319
- key="extra_time",
320
- resolution=0.01,
321
- orientation="h",
322
- default_value=data.get("extra_time", "2.0"),
323
- enable_events=True,
324
- ),
325
- ],
326
- [
327
- sg.Checkbox(
328
- i18n("输入降噪"),
329
- key="I_noise_reduce",
330
- enable_events=True,
331
- ),
332
- sg.Checkbox(
333
- i18n("输出降噪"),
334
- key="O_noise_reduce",
335
- enable_events=True,
336
- ),
337
- ],
338
- ],
339
- title=i18n("性能设置"),
340
- ),
341
- ],
342
- [
343
- sg.Button(i18n("开始音频转换"), key="start_vc"),
344
- sg.Button(i18n("停止音频转换"), key="stop_vc"),
345
- sg.Text(i18n("推理时间(ms):")),
346
- sg.Text("0", key="infer_time"),
347
- ],
348
- ]
349
- self.window = sg.Window("RVC - GUI", layout=layout, finalize=True)
350
- self.event_handler()
351
-
352
- def event_handler(self):
353
- while True:
354
- event, values = self.window.read()
355
- if event == sg.WINDOW_CLOSED:
356
- self.flag_vc = False
357
- exit()
358
- if event == "reload_devices":
359
- prev_input = self.window["sg_input_device"].get()
360
- prev_output = self.window["sg_output_device"].get()
361
- input_devices, output_devices, _, _ = self.get_devices(update=True)
362
- if prev_input not in input_devices:
363
- self.config.sg_input_device = input_devices[0]
364
- else:
365
- self.config.sg_input_device = prev_input
366
- self.window["sg_input_device"].Update(values=input_devices)
367
- self.window["sg_input_device"].Update(
368
- value=self.config.sg_input_device
369
- )
370
- if prev_output not in output_devices:
371
- self.config.sg_output_device = output_devices[0]
372
- else:
373
- self.config.sg_output_device = prev_output
374
- self.window["sg_output_device"].Update(values=output_devices)
375
- self.window["sg_output_device"].Update(
376
- value=self.config.sg_output_device
377
- )
378
- if event == "start_vc" and self.flag_vc == False:
379
- if self.set_values(values) == True:
380
- logger.info("Use CUDA: %s", torch.cuda.is_available())
381
- self.start_vc()
382
- settings = {
383
- "pth_path": values["pth_path"],
384
- "index_path": values["index_path"],
385
- "sg_input_device": values["sg_input_device"],
386
- "sg_output_device": values["sg_output_device"],
387
- "threhold": values["threhold"],
388
- "pitch": values["pitch"],
389
- "rms_mix_rate": values["rms_mix_rate"],
390
- "index_rate": values["index_rate"],
391
- "block_time": values["block_time"],
392
- "crossfade_length": values["crossfade_length"],
393
- "extra_time": values["extra_time"],
394
- "n_cpu": values["n_cpu"],
395
- "f0method": ["pm", "harvest", "crepe", "rmvpe"][
396
- [
397
- values["pm"],
398
- values["harvest"],
399
- values["crepe"],
400
- values["rmvpe"],
401
- ].index(True)
402
- ],
403
- }
404
- with open("configs/config.json", "w") as j:
405
- json.dump(settings, j)
406
- if event == "stop_vc" and self.flag_vc == True:
407
- self.flag_vc = False
408
-
409
- # Parameter hot update
410
- if event == "threhold":
411
- self.config.threhold = values["threhold"]
412
- elif event == "pitch":
413
- self.config.pitch = values["pitch"]
414
- if hasattr(self, "rvc"):
415
- self.rvc.change_key(values["pitch"])
416
- elif event == "index_rate":
417
- self.config.index_rate = values["index_rate"]
418
- if hasattr(self, "rvc"):
419
- self.rvc.change_index_rate(values["index_rate"])
420
- elif event == "rms_mix_rate":
421
- self.config.rms_mix_rate = values["rms_mix_rate"]
422
- elif event in ["pm", "harvest", "crepe", "rmvpe"]:
423
- self.config.f0method = event
424
- elif event == "I_noise_reduce":
425
- self.config.I_noise_reduce = values["I_noise_reduce"]
426
- elif event == "O_noise_reduce":
427
- self.config.O_noise_reduce = values["O_noise_reduce"]
428
- elif event != "start_vc" and self.flag_vc == True:
429
- # Other parameters do not support hot update
430
- self.flag_vc = False
431
-
432
- def set_values(self, values):
433
- if len(values["pth_path"].strip()) == 0:
434
- sg.popup(i18n("请选择pth文件"))
435
- return False
436
- if len(values["index_path"].strip()) == 0:
437
- sg.popup(i18n("请选择index文件"))
438
- return False
439
- pattern = re.compile("[^\x00-\x7F]+")
440
- if pattern.findall(values["pth_path"]):
441
- sg.popup(i18n("pth文件路径不可包含中文"))
442
- return False
443
- if pattern.findall(values["index_path"]):
444
- sg.popup(i18n("index文件路径不可包含中文"))
445
- return False
446
- self.set_devices(values["sg_input_device"], values["sg_output_device"])
447
- self.config.pth_path = values["pth_path"]
448
- self.config.index_path = values["index_path"]
449
- self.config.threhold = values["threhold"]
450
- self.config.pitch = values["pitch"]
451
- self.config.block_time = values["block_time"]
452
- self.config.crossfade_time = values["crossfade_length"]
453
- self.config.extra_time = values["extra_time"]
454
- self.config.I_noise_reduce = values["I_noise_reduce"]
455
- self.config.O_noise_reduce = values["O_noise_reduce"]
456
- self.config.rms_mix_rate = values["rms_mix_rate"]
457
- self.config.index_rate = values["index_rate"]
458
- self.config.n_cpu = values["n_cpu"]
459
- self.config.f0method = ["pm", "harvest", "crepe", "rmvpe"][
460
- [
461
- values["pm"],
462
- values["harvest"],
463
- values["crepe"],
464
- values["rmvpe"],
465
- ].index(True)
466
- ]
467
- return True
468
-
469
- def start_vc(self):
470
- torch.cuda.empty_cache()
471
- self.flag_vc = True
472
- self.rvc = rvc_for_realtime.RVC(
473
- self.config.pitch,
474
- self.config.pth_path,
475
- self.config.index_path,
476
- self.config.index_rate,
477
- self.config.n_cpu,
478
- inp_q,
479
- opt_q,
480
- device,
481
- self.rvc if hasattr(self, "rvc") else None
482
- )
483
- self.config.samplerate = self.rvc.tgt_sr
484
- self.zc = self.rvc.tgt_sr // 100
485
- self.block_frame = int(np.round(self.config.block_time * self.config.samplerate / self.zc)) * self.zc
486
- self.block_frame_16k = 160 * self.block_frame // self.zc
487
- self.crossfade_frame = int(np.round(self.config.crossfade_time * self.config.samplerate / self.zc)) * self.zc
488
- self.sola_search_frame = self.zc
489
- self.extra_frame = int(np.round(self.config.extra_time * self.config.samplerate / self.zc)) * self.zc
490
- self.input_wav: torch.Tensor = torch.zeros(
491
- self.extra_frame
492
- + self.crossfade_frame
493
- + self.sola_search_frame
494
- + self.block_frame,
495
- device=device,
496
- dtype=torch.float32,
497
- )
498
- self.input_wav_res: torch.Tensor= torch.zeros(160 * self.input_wav.shape[0] // self.zc, device=device,dtype=torch.float32)
499
- self.pitch: np.ndarray = np.zeros(
500
- self.input_wav.shape[0] // self.zc,
501
- dtype="int32",
502
- )
503
- self.pitchf: np.ndarray = np.zeros(
504
- self.input_wav.shape[0] // self.zc,
505
- dtype="float64",
506
- )
507
- self.sola_buffer: torch.Tensor = torch.zeros(
508
- self.crossfade_frame, device=device, dtype=torch.float32
509
- )
510
- self.nr_buffer: torch.Tensor = self.sola_buffer.clone()
511
- self.output_buffer: torch.Tensor = self.input_wav.clone()
512
- self.res_buffer: torch.Tensor = torch.zeros(2 * self.zc, device=device,dtype=torch.float32)
513
- self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0]
514
- self.fade_in_window: torch.Tensor = (
515
- torch.sin(
516
- 0.5
517
- * np.pi
518
- * torch.linspace(
519
- 0.0,
520
- 1.0,
521
- steps=self.crossfade_frame,
522
- device=device,
523
- dtype=torch.float32,
524
- )
525
- )
526
- ** 2
527
- )
528
- self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
529
- self.resampler = tat.Resample(
530
- orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
531
- ).to(device)
532
- self.tg = TorchGate(sr=self.config.samplerate, n_fft=4*self.zc, prop_decrease=0.9).to(device)
533
- thread_vc = threading.Thread(target=self.soundinput)
534
- thread_vc.start()
535
-
536
- def soundinput(self):
537
- """
538
- 接受音频输入
539
- """
540
- channels = 1 if sys.platform == "darwin" else 2
541
- with sd.Stream(
542
- channels=channels,
543
- callback=self.audio_callback,
544
- blocksize=self.block_frame,
545
- samplerate=self.config.samplerate,
546
- dtype="float32",
547
- ):
548
- while self.flag_vc:
549
- time.sleep(self.config.block_time)
550
- logger.debug("Audio block passed.")
551
- logger.debug("ENDing VC")
552
-
553
- def audio_callback(
554
- self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
555
- ):
556
- """
557
- 音频处理
558
- """
559
- start_time = time.perf_counter()
560
- indata = librosa.to_mono(indata.T)
561
- if self.config.threhold > -60:
562
- rms = librosa.feature.rms(
563
- y=indata, frame_length=4*self.zc, hop_length=self.zc
564
- )
565
- db_threhold = (
566
- librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
567
- )
568
- for i in range(db_threhold.shape[0]):
569
- if db_threhold[i]:
570
- indata[i * self.zc : (i + 1) * self.zc] = 0
571
- self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone()
572
- self.input_wav[-self.block_frame: ] = torch.from_numpy(indata).to(device)
573
- self.input_wav_res[ : -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone()
574
- # input noise reduction and resampling
575
- if self.config.I_noise_reduce:
576
- input_wav = self.input_wav[-self.crossfade_frame -self.block_frame-2*self.zc: ]
577
- input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0))[0, 2*self.zc:]
578
- input_wav[: self.crossfade_frame] *= self.fade_in_window
579
- input_wav[: self.crossfade_frame] += self.nr_buffer * self.fade_out_window
580
- self.nr_buffer[:] = input_wav[-self.crossfade_frame: ]
581
- input_wav = torch.cat((self.res_buffer[:], input_wav[: self.block_frame]))
582
- self.res_buffer[:] = input_wav[-2*self.zc: ]
583
- self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(input_wav)[160: ]
584
- else:
585
- self.input_wav_res[-self.block_frame_16k-160: ] = self.resampler(self.input_wav[-self.block_frame-2*self.zc: ])[160: ]
586
- # infer
587
- f0_extractor_frame = self.block_frame_16k + 800
588
- if self.config.f0method == 'rmvpe':
589
- f0_extractor_frame = 5120 * ((f0_extractor_frame - 1) // 5120 + 1)
590
- infer_wav = self.rvc.infer(
591
- self.input_wav_res,
592
- self.input_wav_res[-f0_extractor_frame :].cpu().numpy(),
593
- self.block_frame_16k,
594
- self.valid_rate,
595
- self.pitch,
596
- self.pitchf,
597
- self.config.f0method,
598
- )
599
- infer_wav = infer_wav[
600
- -self.crossfade_frame - self.sola_search_frame - self.block_frame :
601
- ]
602
- # output noise reduction
603
- if self.config.O_noise_reduce:
604
- self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone()
605
- self.output_buffer[-self.block_frame: ] = infer_wav[-self.block_frame:]
606
- infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0)
607
- # volume envelop mixing
608
- if self.config.rms_mix_rate < 1:
609
- rms1 = librosa.feature.rms(
610
- y=self.input_wav_res[-160*infer_wav.shape[0]//self.zc :].cpu().numpy(),
611
- frame_length=640,
612
- hop_length=160,
613
- )
614
- rms1 = torch.from_numpy(rms1).to(device)
615
- rms1 = F.interpolate(
616
- rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True,
617
- )[0,0,:-1]
618
- rms2 = librosa.feature.rms(
619
- y=infer_wav[:].cpu().numpy(), frame_length=4*self.zc, hop_length=self.zc
620
- )
621
- rms2 = torch.from_numpy(rms2).to(device)
622
- rms2 = F.interpolate(
623
- rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear",align_corners=True,
624
- )[0,0,:-1]
625
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3)
626
- infer_wav *= torch.pow(rms1 / rms2, torch.tensor(1 - self.config.rms_mix_rate))
627
- # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
628
- conv_input = infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
629
- cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
630
- cor_den = torch.sqrt(
631
- F.conv1d(conv_input ** 2, torch.ones(1, 1, self.crossfade_frame, device=device)) + 1e-8)
632
- if sys.platform == "darwin":
633
- _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
634
- sola_offset = sola_offset.item()
635
- else:
636
- sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
637
- logger.debug("sola_offset = %d", int(sola_offset))
638
- infer_wav = infer_wav[sola_offset: sola_offset + self.block_frame + self.crossfade_frame]
639
- infer_wav[: self.crossfade_frame] *= self.fade_in_window
640
- infer_wav[: self.crossfade_frame] += self.sola_buffer *self.fade_out_window
641
- self.sola_buffer[:] = infer_wav[-self.crossfade_frame:]
642
- if sys.platform == "darwin":
643
- outdata[:] = infer_wav[:-self.crossfade_frame].cpu().numpy()[:, np.newaxis]
644
- else:
645
- outdata[:] = infer_wav[:-self.crossfade_frame].repeat(2, 1).t().cpu().numpy()
646
- total_time = time.perf_counter() - start_time
647
- self.window["infer_time"].update(int(total_time * 1000))
648
- logger.info("Infer time: %.2f", total_time)
649
-
650
- def get_devices(self, update: bool = True):
651
- """获取设备列表"""
652
- if update:
653
- sd._terminate()
654
- sd._initialize()
655
- devices = sd.query_devices()
656
- hostapis = sd.query_hostapis()
657
- for hostapi in hostapis:
658
- for device_idx in hostapi["devices"]:
659
- devices[device_idx]["hostapi_name"] = hostapi["name"]
660
- input_devices = [
661
- f"{d['name']} ({d['hostapi_name']})"
662
- for d in devices
663
- if d["max_input_channels"] > 0
664
- ]
665
- output_devices = [
666
- f"{d['name']} ({d['hostapi_name']})"
667
- for d in devices
668
- if d["max_output_channels"] > 0
669
- ]
670
- input_devices_indices = [
671
- d["index"] if "index" in d else d["name"]
672
- for d in devices
673
- if d["max_input_channels"] > 0
674
- ]
675
- output_devices_indices = [
676
- d["index"] if "index" in d else d["name"]
677
- for d in devices
678
- if d["max_output_channels"] > 0
679
- ]
680
- return (
681
- input_devices,
682
- output_devices,
683
- input_devices_indices,
684
- output_devices_indices,
685
- )
686
-
687
- def set_devices(self, input_device, output_device):
688
- """设置输出设备"""
689
- (
690
- input_devices,
691
- output_devices,
692
- input_device_indices,
693
- output_device_indices,
694
- ) = self.get_devices()
695
- sd.default.device[0] = input_device_indices[
696
- input_devices.index(input_device)
697
- ]
698
- sd.default.device[1] = output_device_indices[
699
- output_devices.index(output_device)
700
- ]
701
- logger.info(
702
- "Input device: %s:%s", str(sd.default.device[0]), input_device
703
- )
704
- logger.info(
705
- "Output device: %s:%s", str(sd.default.device[1]), output_device
706
- )
707
-
708
- gui = GUI()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/discriminator/model.py DELETED
@@ -1,67 +0,0 @@
1
- import functools
2
- import torch.nn as nn
3
-
4
-
5
- from taming.modules.util import ActNorm
6
-
7
-
8
- def weights_init(m):
9
- classname = m.__class__.__name__
10
- if classname.find('Conv') != -1:
11
- nn.init.normal_(m.weight.data, 0.0, 0.02)
12
- elif classname.find('BatchNorm') != -1:
13
- nn.init.normal_(m.weight.data, 1.0, 0.02)
14
- nn.init.constant_(m.bias.data, 0)
15
-
16
-
17
- class NLayerDiscriminator(nn.Module):
18
- """Defines a PatchGAN discriminator as in Pix2Pix
19
- --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
20
- """
21
- def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
22
- """Construct a PatchGAN discriminator
23
- Parameters:
24
- input_nc (int) -- the number of channels in input images
25
- ndf (int) -- the number of filters in the last conv layer
26
- n_layers (int) -- the number of conv layers in the discriminator
27
- norm_layer -- normalization layer
28
- """
29
- super(NLayerDiscriminator, self).__init__()
30
- if not use_actnorm:
31
- norm_layer = nn.BatchNorm2d
32
- else:
33
- norm_layer = ActNorm
34
- if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
35
- use_bias = norm_layer.func != nn.BatchNorm2d
36
- else:
37
- use_bias = norm_layer != nn.BatchNorm2d
38
-
39
- kw = 4
40
- padw = 1
41
- sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
42
- nf_mult = 1
43
- nf_mult_prev = 1
44
- for n in range(1, n_layers): # gradually increase the number of filters
45
- nf_mult_prev = nf_mult
46
- nf_mult = min(2 ** n, 8)
47
- sequence += [
48
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
49
- norm_layer(ndf * nf_mult),
50
- nn.LeakyReLU(0.2, True)
51
- ]
52
-
53
- nf_mult_prev = nf_mult
54
- nf_mult = min(2 ** n_layers, 8)
55
- sequence += [
56
- nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
57
- norm_layer(ndf * nf_mult),
58
- nn.LeakyReLU(0.2, True)
59
- ]
60
-
61
- sequence += [
62
- nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
63
- self.main = nn.Sequential(*sequence)
64
-
65
- def forward(self, input):
66
- """Standard forward."""
67
- return self.main(input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_compat.py DELETED
@@ -1,55 +0,0 @@
1
- import importlib.metadata
2
- from typing import Any, Optional, Protocol, cast
3
-
4
-
5
- class BadMetadata(ValueError):
6
- def __init__(self, dist: importlib.metadata.Distribution, *, reason: str) -> None:
7
- self.dist = dist
8
- self.reason = reason
9
-
10
- def __str__(self) -> str:
11
- return f"Bad metadata in {self.dist} ({self.reason})"
12
-
13
-
14
- class BasePath(Protocol):
15
- """A protocol that various path objects conform.
16
-
17
- This exists because importlib.metadata uses both ``pathlib.Path`` and
18
- ``zipfile.Path``, and we need a common base for type hints (Union does not
19
- work well since ``zipfile.Path`` is too new for our linter setup).
20
-
21
- This does not mean to be exhaustive, but only contains things that present
22
- in both classes *that we need*.
23
- """
24
-
25
- @property
26
- def name(self) -> str:
27
- raise NotImplementedError()
28
-
29
- @property
30
- def parent(self) -> "BasePath":
31
- raise NotImplementedError()
32
-
33
-
34
- def get_info_location(d: importlib.metadata.Distribution) -> Optional[BasePath]:
35
- """Find the path to the distribution's metadata directory.
36
-
37
- HACK: This relies on importlib.metadata's private ``_path`` attribute. Not
38
- all distributions exist on disk, so importlib.metadata is correct to not
39
- expose the attribute as public. But pip's code base is old and not as clean,
40
- so we do this to avoid having to rewrite too many things. Hopefully we can
41
- eliminate this some day.
42
- """
43
- return getattr(d, "_path", None)
44
-
45
-
46
- def get_dist_name(dist: importlib.metadata.Distribution) -> str:
47
- """Get the distribution's project name.
48
-
49
- The ``name`` attribute is only available in Python 3.10 or later. We are
50
- targeting exactly that, but Mypy does not know this.
51
- """
52
- name = cast(Any, dist).name
53
- if not isinstance(name, str):
54
- raise BadMetadata(dist, reason="invalid metadata entry 'name'")
55
- return name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/upload_progress.py DELETED
@@ -1,52 +0,0 @@
1
- import io
2
-
3
-
4
- class CancelledError(Exception):
5
- def __init__(self, msg):
6
- self.msg = msg
7
- Exception.__init__(self, msg)
8
-
9
- def __str__(self):
10
- return self.msg
11
-
12
- __repr__ = __str__
13
-
14
-
15
- class BufferReader(io.BytesIO):
16
- def __init__(self, buf=b"", desc=None):
17
- self._len = len(buf)
18
- io.BytesIO.__init__(self, buf)
19
- self._progress = 0
20
- self._callback = progress(len(buf), desc=desc)
21
-
22
- def __len__(self):
23
- return self._len
24
-
25
- def read(self, n=-1):
26
- chunk = io.BytesIO.read(self, n)
27
- self._progress += len(chunk)
28
- if self._callback:
29
- try:
30
- self._callback(self._progress)
31
- except Exception as e: # catches exception from the callback
32
- raise CancelledError("The upload was cancelled: {}".format(e))
33
- return chunk
34
-
35
-
36
- def progress(total, desc):
37
- import tqdm # type: ignore
38
-
39
- meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc)
40
-
41
- def incr(progress):
42
- meter.n = progress
43
- if progress == total:
44
- meter.close()
45
- else:
46
- meter.refresh()
47
-
48
- return incr
49
-
50
-
51
- def MB(i):
52
- return int(i // 1024 ** 2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Brasd99/TTS-Voice-Conversion/app.py DELETED
@@ -1,72 +0,0 @@
1
- from TTS.api import TTS
2
- from bs4 import BeautifulSoup
3
- import requests
4
- import streamlit as st
5
- import tempfile
6
- import os
7
- import json
8
- import datetime
9
-
10
- with open('config.json', 'r') as f:
11
- config = json.load(f)
12
-
13
- APP_NAME = config['APP_NAME']
14
- APP_LOGO = config['APP_LOGO']
15
- APP_DESCRIPTION = config['APP_DESCRIPTION']
16
-
17
- def contains_only_ascii(input_string):
18
- return all(ord(char) < 128 for char in input_string)
19
-
20
- def create_temp_file(input_wav):
21
- temp_file = tempfile.NamedTemporaryFile(delete=False)
22
- temp_file.write(input_wav.read())
23
- return temp_file
24
-
25
- def remove_temp_file(temp_file):
26
- temp_file.close()
27
- os.remove(temp_file.name)
28
-
29
- def update_progress(percent, text):
30
- progress_bar.progress(percent)
31
- status_text.text(text)
32
-
33
- st.set_page_config(page_title=APP_NAME)
34
- st.title(APP_NAME)
35
- st.image(APP_LOGO, use_column_width=True)
36
- st.markdown(APP_DESCRIPTION)
37
-
38
- input_wav = st.file_uploader("Upload a WAV file with your voice", type=["wav"])
39
- clone_wav = st.file_uploader("Upload a WAV file with voice to clone", type=["wav"])
40
-
41
- if input_wav and clone_wav:
42
- progress_bar = st.progress(0)
43
- status_text = st.empty()
44
-
45
- current_datetime = datetime.datetime.now()
46
- formatted_datetime = current_datetime.strftime("%Y-%m-%d_%H%M%S")
47
- output_filename = f"recording_{formatted_datetime}.wav"
48
-
49
- temp_input_file = create_temp_file(input_wav)
50
- temp_clone_file = create_temp_file(clone_wav)
51
-
52
- update_progress(0, 'Loading TTS model...')
53
- api = TTS("voice_conversion_models/multilingual/vctk/freevc24")
54
-
55
- update_progress(50, 'Generating audio...')
56
- api.voice_conversion_to_file(
57
- source_wav=temp_input_file.name,
58
- target_wav=temp_clone_file.name,
59
- file_path=output_filename
60
- )
61
-
62
- remove_temp_file(temp_input_file)
63
- remove_temp_file(temp_clone_file)
64
-
65
- audio_file = open(output_filename, 'rb')
66
- audio_bytes = audio_file.read()
67
-
68
- update_progress(100, 'Audio generated successfully!')
69
-
70
- st.audio(audio_bytes, format='audio/wav')
71
-
72
- st.download_button('Download WAV', data=audio_bytes, file_name='output.wav')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/data/datasets/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- from .cityscapes import load_cityscapes_instances
3
- from .coco import load_coco_json, load_sem_seg
4
- from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta
5
- from .register_coco import register_coco_instances, register_coco_panoptic_separated
6
- from . import builtin # ensure the builtin datasets are registered
7
-
8
-
9
- __all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa/models/mcan/adapter.py DELETED
@@ -1,90 +0,0 @@
1
- # --------------------------------------------------------
2
- # OpenVQA
3
- # Written by Yuhao Cui https://github.com/cuiyuhao1996
4
- # --------------------------------------------------------
5
-
6
- import torch.nn as nn
7
- import torch
8
- from openvqa.core.base_dataset import BaseAdapter
9
- from openvqa.utils.make_mask import make_mask
10
-
11
-
12
- class Adapter(BaseAdapter):
13
- def __init__(self, __C):
14
- super(Adapter, self).__init__(__C)
15
- self.__C = __C
16
-
17
- def bbox_proc(self, bbox):
18
- area = (bbox[:, :, 2] - bbox[:, :, 0]) * (bbox[:, :, 3] - bbox[:, :, 1])
19
- return torch.cat((bbox, area.unsqueeze(2)), -1)
20
-
21
- def vqa_init(self, __C):
22
- imgfeat_linear_size = __C.FEAT_SIZE['vqa']['FRCN_FEAT_SIZE'][1]
23
- if __C.USE_BBOX_FEAT:
24
- self.bbox_linear = nn.Linear(5, __C.BBOXFEAT_EMB_SIZE)
25
- imgfeat_linear_size += __C.BBOXFEAT_EMB_SIZE
26
- self.frcn_linear = nn.Linear(imgfeat_linear_size, __C.HIDDEN_SIZE)
27
-
28
-
29
- def gqa_init(self, __C):
30
- imgfeat_linear_size = __C.FEAT_SIZE['gqa']['FRCN_FEAT_SIZE'][1]
31
- if __C.USE_BBOX_FEAT:
32
- self.bbox_linear = nn.Linear(5, __C.BBOXFEAT_EMB_SIZE)
33
- imgfeat_linear_size += __C.BBOXFEAT_EMB_SIZE
34
- self.frcn_linear = nn.Linear(imgfeat_linear_size, __C.HIDDEN_SIZE)
35
-
36
- if __C.USE_AUX_FEAT:
37
- self.grid_linear = nn.Linear(__C.FEAT_SIZE['gqa']['GRID_FEAT_SIZE'][1], __C.HIDDEN_SIZE)
38
-
39
-
40
- def clevr_init(self, __C):
41
- self.grid_linear = nn.Linear(__C.FEAT_SIZE['clevr']['GRID_FEAT_SIZE'][1], __C.HIDDEN_SIZE)
42
-
43
-
44
- def vqa_forward(self, feat_dict):
45
- frcn_feat = feat_dict['FRCN_FEAT']
46
- bbox_feat = feat_dict['BBOX_FEAT']
47
-
48
- img_feat_mask = make_mask(frcn_feat)
49
-
50
- if self.__C.USE_BBOX_FEAT:
51
- bbox_feat = self.bbox_proc(bbox_feat)
52
- bbox_feat = self.bbox_linear(bbox_feat)
53
- frcn_feat = torch.cat((frcn_feat, bbox_feat), dim=-1)
54
- img_feat = self.frcn_linear(frcn_feat)
55
-
56
- return img_feat, img_feat_mask
57
-
58
-
59
- def gqa_forward(self, feat_dict):
60
- frcn_feat = feat_dict['FRCN_FEAT']
61
- bbox_feat = feat_dict['BBOX_FEAT']
62
- grid_feat = feat_dict['GRID_FEAT']
63
-
64
- img_feat_mask = make_mask(frcn_feat)
65
-
66
- if self.__C.USE_BBOX_FEAT:
67
- bbox_feat = self.bbox_proc(bbox_feat)
68
- bbox_feat = self.bbox_linear(bbox_feat)
69
- frcn_feat = torch.cat((frcn_feat, bbox_feat), dim=-1)
70
- img_feat = self.frcn_linear(frcn_feat)
71
-
72
- if self.__C.USE_AUX_FEAT:
73
- grid_feat_mask = make_mask(grid_feat)
74
- img_feat_mask = torch.cat((img_feat_mask, grid_feat_mask), dim=-1)
75
- grid_feat = self.grid_linear(grid_feat)
76
- img_feat = torch.cat((img_feat, grid_feat), dim=1)
77
-
78
- return img_feat, img_feat_mask
79
-
80
-
81
- def clevr_forward(self, feat_dict):
82
- grid_feat = feat_dict['GRID_FEAT']
83
-
84
- img_feat_mask = make_mask(grid_feat)
85
- img_feat = self.grid_linear(grid_feat)
86
-
87
- return img_feat, img_feat_mask
88
-
89
-
90
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarperAI/StableVicuna/app.py DELETED
@@ -1,138 +0,0 @@
1
- import os
2
- import gc
3
- from string import Template
4
- from threading import Thread
5
-
6
- import torch
7
- import gradio as gr
8
- from transformers import AutoTokenizer, AutoModelForCausalLM, BatchEncoding, TextIteratorStreamer
9
-
10
-
11
- auth_token = os.environ.get("HUGGINGFACE_TOKEN")
12
- tokenizer = AutoTokenizer.from_pretrained(
13
- "CarperAI/stable-vicuna-13b-fp16",
14
- use_auth_token=auth_token if auth_token else True,
15
- )
16
- model = AutoModelForCausalLM.from_pretrained(
17
- "CarperAI/stable-vicuna-13b-fp16",
18
- torch_dtype=torch.float16,
19
- low_cpu_mem_usage=True,
20
- device_map="auto",
21
- use_auth_token=auth_token if auth_token else True,
22
- )
23
- model.eval()
24
-
25
-
26
- max_context_length = model.config.max_position_embeddings
27
- max_new_tokens = 768
28
-
29
-
30
- prompt_template = Template("""\
31
- ### Human: $human
32
- ### Assistant: $bot\
33
- """)
34
-
35
-
36
- system_prompt = "### Assistant: I am StableVicuna, a large language model created by CarperAI. I am here to chat!"
37
- system_prompt_tokens = tokenizer([f"{system_prompt}\n\n"], return_tensors="pt")
38
- max_sys_tokens = system_prompt_tokens['input_ids'].size(-1)
39
-
40
-
41
- def bot(history):
42
- history = history or []
43
-
44
- # Inject prompt formatting into the history
45
- prompt_history = []
46
- for human, bot in history:
47
- if bot is not None:
48
- bot = bot.replace("<br>", "\n")
49
- bot = bot.rstrip()
50
- prompt_history.append(
51
- prompt_template.substitute(
52
- human=human, bot=bot if bot is not None else "")
53
- )
54
-
55
- msg_tokens = tokenizer(
56
- "\n\n".join(prompt_history).strip(),
57
- return_tensors="pt",
58
- add_special_tokens=False # Use <BOS> from the system prompt
59
- )
60
-
61
- # Take only the most recent context up to the max context length and prepend the
62
- # system prompt with the messages
63
- max_tokens = -max_context_length + max_new_tokens + max_sys_tokens
64
- inputs = BatchEncoding({
65
- k: torch.concat([system_prompt_tokens[k], msg_tokens[k][:, max_tokens:]], dim=-1)
66
- for k in msg_tokens
67
- }).to('cuda')
68
- # Remove `token_type_ids` b/c it's not yet supported for LLaMA `transformers` models
69
- if inputs.get("token_type_ids", None) is not None:
70
- inputs.pop("token_type_ids")
71
-
72
- streamer = TextIteratorStreamer(
73
- tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
74
- )
75
- generate_kwargs = dict(
76
- inputs,
77
- streamer=streamer,
78
- max_new_tokens=max_new_tokens,
79
- do_sample=True,
80
- top_p=1.0,
81
- temperature=1.0,
82
- )
83
- thread = Thread(target=model.generate, kwargs=generate_kwargs)
84
- thread.start()
85
-
86
- partial_text = ""
87
- for new_text in streamer:
88
- # Process out the prompt separator
89
- new_text = new_text.replace("<br>", "\n")
90
- if "###" in new_text:
91
- new_text = new_text.split("###")[0]
92
- partial_text += new_text.strip()
93
- history[-1][1] = partial_text
94
- break
95
- else:
96
- # Filter empty trailing new lines
97
- if new_text == "\n":
98
- new_text = new_text.strip()
99
- partial_text += new_text
100
- history[-1][1] = partial_text
101
- yield history
102
- return partial_text
103
-
104
-
105
- def user(user_message, history):
106
- return "", history + [[user_message, None]]
107
-
108
-
109
- with gr.Blocks() as demo:
110
- gr.Markdown("# StableVicuna by CarperAI")
111
- gr.HTML("<a href='https://huggingface.co/CarperAI/stable-vicuna-13b-delta'><code>CarperAI/stable-vicuna-13b-delta</a>")
112
- gr.HTML('''<center><a href="https://huggingface.co/spaces/CarperAI/StableVicuna?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space to skip the queue and run in a private space</center>''')
113
-
114
- chatbot = gr.Chatbot([], elem_id="chatbot").style(height=500)
115
- state = gr.State([])
116
- with gr.Row():
117
- with gr.Column():
118
- msg = gr.Textbox(
119
- label="Send a message",
120
- placeholder="Send a message",
121
- show_label=False
122
- ).style(container=False)
123
- with gr.Column():
124
- with gr.Row():
125
- submit = gr.Button("Send")
126
- stop = gr.Button("Stop")
127
- clear = gr.Button("Clear History")
128
-
129
- submit_event = msg.submit(user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(
130
- fn=bot, inputs=[chatbot], outputs=[chatbot], queue=True)
131
- submit_click_event = submit.click(user, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False).then(
132
- fn=bot, inputs=[chatbot], outputs=[chatbot], queue=True)
133
-
134
- stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_event, submit_click_event], queue=False)
135
- clear.click(lambda: None, None, [chatbot], queue=True)
136
-
137
- demo.queue(max_size=32)
138
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/__init__.py DELETED
File without changes
spaces/ChandraMohanNayal/AutoGPT/autogpt/llm_utils.py DELETED
@@ -1,172 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import time
4
- from ast import List
5
-
6
- import openai
7
- from colorama import Fore, Style
8
- from openai.error import APIError, RateLimitError
9
-
10
- from autogpt.config import Config
11
- from autogpt.logs import logger
12
-
13
- CFG = Config()
14
-
15
- openai.api_key = CFG.openai_api_key
16
-
17
-
18
- def call_ai_function(
19
- function: str, args: list, description: str, model: str | None = None
20
- ) -> str:
21
- """Call an AI function
22
-
23
- This is a magic function that can do anything with no-code. See
24
- https://github.com/Torantulino/AI-Functions for more info.
25
-
26
- Args:
27
- function (str): The function to call
28
- args (list): The arguments to pass to the function
29
- description (str): The description of the function
30
- model (str, optional): The model to use. Defaults to None.
31
-
32
- Returns:
33
- str: The response from the function
34
- """
35
- if model is None:
36
- model = CFG.smart_llm_model
37
- # For each arg, if any are None, convert to "None":
38
- args = [str(arg) if arg is not None else "None" for arg in args]
39
- # parse args to comma separated string
40
- args = ", ".join(args)
41
- messages = [
42
- {
43
- "role": "system",
44
- "content": f"You are now the following python function: ```# {description}"
45
- f"\n{function}```\n\nOnly respond with your `return` value.",
46
- },
47
- {"role": "user", "content": args},
48
- ]
49
-
50
- return create_chat_completion(model=model, messages=messages, temperature=0)
51
-
52
-
53
- # Overly simple abstraction until we create something better
54
- # simple retry mechanism when getting a rate error or a bad gateway
55
- def create_chat_completion(
56
- messages: list, # type: ignore
57
- model: str | None = None,
58
- temperature: float = CFG.temperature,
59
- max_tokens: int | None = None,
60
- ) -> str:
61
- """Create a chat completion using the OpenAI API
62
-
63
- Args:
64
- messages (list[dict[str, str]]): The messages to send to the chat completion
65
- model (str, optional): The model to use. Defaults to None.
66
- temperature (float, optional): The temperature to use. Defaults to 0.9.
67
- max_tokens (int, optional): The max tokens to use. Defaults to None.
68
-
69
- Returns:
70
- str: The response from the chat completion
71
- """
72
- response = None
73
- num_retries = 10
74
- warned_user = False
75
- if CFG.debug_mode:
76
- print(
77
- Fore.GREEN
78
- + f"Creating chat completion with model {model}, temperature {temperature},"
79
- f" max_tokens {max_tokens}" + Fore.RESET
80
- )
81
- for attempt in range(num_retries):
82
- backoff = 2 ** (attempt + 2)
83
- try:
84
- if CFG.use_azure:
85
- response = openai.ChatCompletion.create(
86
- deployment_id=CFG.get_azure_deployment_id_for_model(model),
87
- model=model,
88
- messages=messages,
89
- temperature=temperature,
90
- max_tokens=max_tokens,
91
- )
92
- else:
93
- response = openai.ChatCompletion.create(
94
- model=model,
95
- messages=messages,
96
- temperature=temperature,
97
- max_tokens=max_tokens,
98
- )
99
- break
100
- except RateLimitError:
101
- if CFG.debug_mode:
102
- print(
103
- Fore.RED + "Error: ",
104
- f"Reached rate limit, passing..." + Fore.RESET,
105
- )
106
- if not warned_user:
107
- logger.double_check(
108
- f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
109
- + f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
110
- )
111
- warned_user = True
112
- except APIError as e:
113
- if e.http_status == 502:
114
- pass
115
- else:
116
- raise
117
- if attempt == num_retries - 1:
118
- raise
119
- if CFG.debug_mode:
120
- print(
121
- Fore.RED + "Error: ",
122
- f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
123
- )
124
- time.sleep(backoff)
125
- if response is None:
126
- logger.typewriter_log(
127
- "FAILED TO GET RESPONSE FROM OPENAI",
128
- Fore.RED,
129
- "Auto-GPT has failed to get a response from OpenAI's services. "
130
- + f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
131
- )
132
- logger.double_check()
133
- if CFG.debug_mode:
134
- raise RuntimeError(f"Failed to get response after {num_retries} retries")
135
- else:
136
- quit(1)
137
-
138
- return response.choices[0].message["content"]
139
-
140
-
141
- def create_embedding_with_ada(text) -> list:
142
- """Create an embedding with text-ada-002 using the OpenAI SDK"""
143
- num_retries = 10
144
- for attempt in range(num_retries):
145
- backoff = 2 ** (attempt + 2)
146
- try:
147
- if CFG.use_azure:
148
- return openai.Embedding.create(
149
- input=[text],
150
- engine=CFG.get_azure_deployment_id_for_model(
151
- "text-embedding-ada-002"
152
- ),
153
- )["data"][0]["embedding"]
154
- else:
155
- return openai.Embedding.create(
156
- input=[text], model="text-embedding-ada-002"
157
- )["data"][0]["embedding"]
158
- except RateLimitError:
159
- pass
160
- except APIError as e:
161
- if e.http_status == 502:
162
- pass
163
- else:
164
- raise
165
- if attempt == num_retries - 1:
166
- raise
167
- if CFG.debug_mode:
168
- print(
169
- Fore.RED + "Error: ",
170
- f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
171
- )
172
- time.sleep(backoff)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chilangosta/text-to-pokemon/app.py DELETED
@@ -1,204 +0,0 @@
1
- from contextlib import nullcontext
2
- import gradio as gr
3
- import torch
4
- from torch import autocast
5
- from diffusers import StableDiffusionPipeline
6
-
7
-
8
- device = "cuda" if torch.cuda.is_available() else "cpu"
9
- context = autocast if device == "cuda" else nullcontext
10
- dtype = torch.float16 if device == "cuda" else torch.float32
11
-
12
- pipe = StableDiffusionPipeline.from_pretrained("lambdalabs/sd-pokemon-diffusers", torch_dtype=dtype)
13
- pipe = pipe.to(device)
14
-
15
-
16
- # Sometimes the nsfw checker is confused by the Pokémon images, you can disable
17
- # it at your own risk here
18
- disable_safety = True
19
-
20
- if disable_safety:
21
- def null_safety(images, **kwargs):
22
- return images, False
23
- pipe.safety_checker = null_safety
24
-
25
-
26
- def infer(prompt, n_samples, steps, scale):
27
-
28
- with context("cuda"):
29
- images = pipe(n_samples*[prompt], guidance_scale=scale, num_inference_steps=steps).images
30
-
31
- return images
32
-
33
- css = """
34
- a {
35
- color: inherit;
36
- text-decoration: underline;
37
- }
38
- .gradio-container {
39
- font-family: 'IBM Plex Sans', sans-serif;
40
- }
41
- .gr-button {
42
- color: white;
43
- border-color: #9d66e5;
44
- background: #9d66e5;
45
- }
46
- input[type='range'] {
47
- accent-color: #9d66e5;
48
- }
49
- .dark input[type='range'] {
50
- accent-color: #dfdfdf;
51
- }
52
- .container {
53
- max-width: 730px;
54
- margin: auto;
55
- padding-top: 1.5rem;
56
- }
57
- #gallery {
58
- min-height: 22rem;
59
- margin-bottom: 15px;
60
- margin-left: auto;
61
- margin-right: auto;
62
- border-bottom-right-radius: .5rem !important;
63
- border-bottom-left-radius: .5rem !important;
64
- }
65
- #gallery>div>.h-full {
66
- min-height: 20rem;
67
- }
68
- .details:hover {
69
- text-decoration: underline;
70
- }
71
- .gr-button {
72
- white-space: nowrap;
73
- }
74
- .gr-button:focus {
75
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
76
- outline: none;
77
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
78
- --tw-border-opacity: 1;
79
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
80
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
81
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
82
- --tw-ring-opacity: .5;
83
- }
84
- #advanced-options {
85
- margin-bottom: 20px;
86
- }
87
- .footer {
88
- margin-bottom: 45px;
89
- margin-top: 35px;
90
- text-align: center;
91
- border-bottom: 1px solid #e5e5e5;
92
- }
93
- .footer>p {
94
- font-size: .8rem;
95
- display: inline-block;
96
- padding: 0 10px;
97
- transform: translateY(10px);
98
- background: white;
99
- }
100
- .dark .logo{ filter: invert(1); }
101
- .dark .footer {
102
- border-color: #303030;
103
- }
104
- .dark .footer>p {
105
- background: #0b0f19;
106
- }
107
- .acknowledgments h4{
108
- margin: 1.25em 0 .25em 0;
109
- font-weight: bold;
110
- font-size: 115%;
111
- }
112
- """
113
-
114
- block = gr.Blocks(css=css)
115
-
116
- examples = [
117
- [
118
- 'Yoda',
119
- 2,
120
- 7.5,
121
- ],
122
- [
123
- 'Abraham Lincoln',
124
- 2,
125
- 7.5,
126
- ],
127
- [
128
- 'George Washington',
129
- 2,
130
- 7,
131
- ],
132
- ]
133
-
134
- with block:
135
- gr.HTML(
136
- """
137
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
138
- <div>
139
- <img class="logo" src="https://lambdalabs.com/hubfs/logos/lambda-logo.svg" alt="Lambda Logo"
140
- style="margin: auto; max-width: 7rem;">
141
- <h1 style="font-weight: 900; font-size: 3rem;">
142
- Pokémon text to image
143
- </h1>
144
- </div>
145
- <p style="margin-bottom: 10px; font-size: 94%">
146
- Generate new Pokémon from a text description,
147
- <a href="https://lambdalabs.com/blog/how-to-fine-tune-stable-diffusion-how-we-made-the-text-to-pokemon-model-at-lambda/">created by Lambda Labs</a>.
148
- </p>
149
- </div>
150
- """
151
- )
152
- with gr.Group():
153
- with gr.Box():
154
- with gr.Row().style(mobile_collapse=False, equal_height=True):
155
- text = gr.Textbox(
156
- label="Enter your prompt",
157
- show_label=False,
158
- max_lines=1,
159
- placeholder="Enter your prompt",
160
- ).style(
161
- border=(True, False, True, True),
162
- rounded=(True, False, False, True),
163
- container=False,
164
- )
165
- btn = gr.Button("Generate image").style(
166
- margin=False,
167
- rounded=(False, True, True, False),
168
- )
169
-
170
- gallery = gr.Gallery(
171
- label="Generated images", show_label=False, elem_id="gallery"
172
- ).style(grid=[2], height="auto")
173
-
174
-
175
- with gr.Row(elem_id="advanced-options"):
176
- samples = gr.Slider(label="Images", minimum=1, maximum=4, value=2, step=1)
177
- steps = gr.Slider(label="Steps", minimum=5, maximum=50, value=25, step=5)
178
- scale = gr.Slider(
179
- label="Guidance Scale", minimum=0, maximum=50, value=7.5, step=0.1
180
- )
181
-
182
-
183
- ex = gr.Examples(examples=examples, fn=infer, inputs=[text, samples, scale], outputs=gallery, cache_examples=False)
184
- ex.dataset.headers = [""]
185
-
186
-
187
- text.submit(infer, inputs=[text, samples, steps, scale], outputs=gallery)
188
- btn.click(infer, inputs=[text, samples, steps, scale], outputs=gallery)
189
- gr.HTML(
190
- """
191
- <div class="footer">
192
- <p> Gradio Demo by 🤗 Hugging Face and Lambda Labs
193
- </p>
194
- </div>
195
- <div class="acknowledgments">
196
- <p> Put in a text prompt and generate your own Pokémon character, no "prompt engineering" required!
197
- <p>If you want to find out how we made this model read about it in <a href="https://lambdalabs.com/blog/how-to-fine-tune-stable-diffusion-how-we-made-the-text-to-pokemon-model-at-lambda/">this blog post</a>.
198
- <p>And if you want to train your own Stable Diffusion variants, see our <a href="https://github.com/LambdaLabsML/examples/tree/main/stable-diffusion-finetuning">Examples Repo</a>!
199
- <p>Trained by <a href="justinpinkney.com">Justin Pinkney</a> (<a href="https://twitter.com/Buntworthy">@Buntworthy</a>) at <a href="https://lambdalabs.com/">Lambda Labs</a>.</p>
200
- </div>
201
- """
202
- )
203
-
204
- block.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cletrason/Cletrason-toad-in-the-mario-movie/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Cletrason Toad In The Mario Movie
3
- emoji: 💩
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DHEIVER/CoronaryAngioSegment/detect_anomalies.py DELETED
@@ -1,13 +0,0 @@
1
- import numpy as np
2
- import cv2
3
-
4
- def detect_anomalies(mask):
5
- # Aplicar um limiar à máscara para identificar as regiões de anomalia
6
- threshold = 0.5 # Ajuste o limiar conforme necessário
7
- anomalies = (mask > threshold).astype(np.uint8)
8
-
9
- # Aplicar pós-processamento, como erosão e dilatação, se necessário
10
- # anomalies = cv2.erode(anomalies, kernel, iterations=1)
11
- # anomalies = cv2.dilate(anomalies, kernel, iterations=1)
12
-
13
- return anomalies
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/ttCollection.py DELETED
@@ -1,127 +0,0 @@
1
- from fontTools.ttLib.ttFont import TTFont
2
- from fontTools.ttLib.sfnt import readTTCHeader, writeTTCHeader
3
- from io import BytesIO
4
- import struct
5
- import logging
6
-
7
- log = logging.getLogger(__name__)
8
-
9
-
10
- class TTCollection(object):
11
-
12
- """Object representing a TrueType Collection / OpenType Collection.
13
- The main API is self.fonts being a list of TTFont instances.
14
-
15
- If shareTables is True, then different fonts in the collection
16
- might point to the same table object if the data for the table was
17
- the same in the font file. Note, however, that this might result
18
- in suprises and incorrect behavior if the different fonts involved
19
- have different GlyphOrder. Use only if you know what you are doing.
20
- """
21
-
22
- def __init__(self, file=None, shareTables=False, **kwargs):
23
- fonts = self.fonts = []
24
- if file is None:
25
- return
26
-
27
- assert "fontNumber" not in kwargs, kwargs
28
-
29
- closeStream = False
30
- if not hasattr(file, "read"):
31
- file = open(file, "rb")
32
- closeStream = True
33
-
34
- tableCache = {} if shareTables else None
35
-
36
- header = readTTCHeader(file)
37
- for i in range(header.numFonts):
38
- font = TTFont(file, fontNumber=i, _tableCache=tableCache, **kwargs)
39
- fonts.append(font)
40
-
41
- # don't close file if lazy=True, as the TTFont hold a reference to the original
42
- # file; the file will be closed once the TTFonts are closed in the
43
- # TTCollection.close(). We still want to close the file if lazy is None or
44
- # False, because in that case the TTFont no longer need the original file
45
- # and we want to avoid 'ResourceWarning: unclosed file'.
46
- if not kwargs.get("lazy") and closeStream:
47
- file.close()
48
-
49
- def __enter__(self):
50
- return self
51
-
52
- def __exit__(self, type, value, traceback):
53
- self.close()
54
-
55
- def close(self):
56
- for font in self.fonts:
57
- font.close()
58
-
59
- def save(self, file, shareTables=True):
60
- """Save the font to disk. Similarly to the constructor,
61
- the 'file' argument can be either a pathname or a writable
62
- file object.
63
- """
64
- if not hasattr(file, "write"):
65
- final = None
66
- file = open(file, "wb")
67
- else:
68
- # assume "file" is a writable file object
69
- # write to a temporary stream to allow saving to unseekable streams
70
- final = file
71
- file = BytesIO()
72
-
73
- tableCache = {} if shareTables else None
74
-
75
- offsets_offset = writeTTCHeader(file, len(self.fonts))
76
- offsets = []
77
- for font in self.fonts:
78
- offsets.append(file.tell())
79
- font._save(file, tableCache=tableCache)
80
- file.seek(0, 2)
81
-
82
- file.seek(offsets_offset)
83
- file.write(struct.pack(">%dL" % len(self.fonts), *offsets))
84
-
85
- if final:
86
- final.write(file.getvalue())
87
- file.close()
88
-
89
- def saveXML(self, fileOrPath, newlinestr="\n", writeVersion=True, **kwargs):
90
-
91
- from fontTools.misc import xmlWriter
92
-
93
- writer = xmlWriter.XMLWriter(fileOrPath, newlinestr=newlinestr)
94
-
95
- if writeVersion:
96
- from fontTools import version
97
-
98
- version = ".".join(version.split(".")[:2])
99
- writer.begintag("ttCollection", ttLibVersion=version)
100
- else:
101
- writer.begintag("ttCollection")
102
- writer.newline()
103
- writer.newline()
104
-
105
- for font in self.fonts:
106
- font._saveXML(writer, writeVersion=False, **kwargs)
107
- writer.newline()
108
-
109
- writer.endtag("ttCollection")
110
- writer.newline()
111
-
112
- writer.close()
113
-
114
- def __getitem__(self, item):
115
- return self.fonts[item]
116
-
117
- def __setitem__(self, item, value):
118
- self.fonts[item] = value
119
-
120
- def __delitem__(self, item):
121
- return self.fonts[item]
122
-
123
- def __len__(self):
124
- return len(self.fonts)
125
-
126
- def __iter__(self):
127
- return iter(self.fonts)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/BlockTitle-dee077e8.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as h,e as k,s as g,a9 as w,N as $,O as B,m as I,K as d,U as _,p as c,ab as N,ac as S,ad as j,z as r,u as q,v as m,y as v,A as p,k as z,o as A,x as C,P as K,R as O}from"./index-1d65707a.js";import{I as P}from"./Info-7c6961ef.js";import"./Button-f155035a.js";function b(a){let e,l;return e=new P({props:{$$slots:{default:[R]},$$scope:{ctx:a}}}),{c(){z(e.$$.fragment)},m(n,o){A(e,n,o),l=!0},p(n,o){const u={};o&10&&(u.$$scope={dirty:o,ctx:n}),e.$set(u)},i(n){l||(r(e.$$.fragment,n),l=!0)},o(n){m(e.$$.fragment,n),l=!1},d(n){C(e,n)}}}function R(a){let e;return{c(){e=K(a[1])},m(l,n){c(l,e,n)},p(l,n){n&2&&O(e,l[1])},d(l){l&&p(e)}}}function T(a){let e,l,n,o;const u=a[2].default,f=w(u,a,a[3],null);let s=a[1]&&b(a);return{c(){e=$("span"),f&&f.c(),l=B(),s&&s.c(),n=I(),d(e,"data-testid","block-info"),d(e,"class","svelte-1gfkn6j"),_(e,"sr-only",!a[0]),_(e,"hide",!a[0]),_(e,"has-info",a[1]!=null)},m(t,i){c(t,e,i),f&&f.m(e,null),c(t,l,i),s&&s.m(t,i),c(t,n,i),o=!0},p(t,[i]){f&&f.p&&(!o||i&8)&&N(f,u,t,t[3],o?j(u,t[3],i,null):S(t[3]),null),(!o||i&1)&&_(e,"sr-only",!t[0]),(!o||i&1)&&_(e,"hide",!t[0]),(!o||i&2)&&_(e,"has-info",t[1]!=null),t[1]?s?(s.p(t,i),i&2&&r(s,1)):(s=b(t),s.c(),r(s,1),s.m(n.parentNode,n)):s&&(q(),m(s,1,1,()=>{s=null}),v())},i(t){o||(r(f,t),r(s),o=!0)},o(t){m(f,t),m(s),o=!1},d(t){t&&(p(e),p(l),p(n)),f&&f.d(t),s&&s.d(t)}}}function U(a,e,l){let{$$slots:n={},$$scope:o}=e,{show_label:u=!0}=e,{info:f=void 0}=e;return a.$$set=s=>{"show_label"in s&&l(0,u=s.show_label),"info"in s&&l(1,f=s.info),"$$scope"in s&&l(3,o=s.$$scope)},[u,f,n,o]}class G extends h{constructor(e){super(),k(this,e,U,T,g,{show_label:0,info:1})}}export{G as B};
2
- //# sourceMappingURL=BlockTitle-dee077e8.js.map
 
 
 
spaces/DataScienceEngineering/6-TreemapAndSunburst/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: 🧠Visualization Plotly Sunbursts Treemaps WebGL🩺
3
- emoji: 6-Vis🧠
4
- colorFrom: indigo
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datatrooper/boston_housing/app.py DELETED
@@ -1,85 +0,0 @@
1
- import numpy as np
2
- import pandas as pd
3
- import matplotlib.pyplot as plt
4
- import gradio as gr
5
- #import joblib
6
- from sklearn.linear_model import LinearRegression
7
- from sklearn.tree import DecisionTreeRegressor
8
- from sklearn.ensemble import RandomForestRegressor
9
- from sklearn.model_selection import StratifiedShuffleSplit
10
- from sklearn.impute import SimpleImputer
11
- from sklearn.pipeline import Pipeline
12
- from sklearn.compose import ColumnTransformer
13
- from sklearn.preprocessing import StandardScaler
14
- from sklearn.preprocessing import OneHotEncoder
15
- from sklearn.metrics import mean_squared_error
16
- from sklearn.model_selection import cross_val_score
17
- from sklearn.model_selection import RandomizedSearchCV
18
- from sklearn.preprocessing import MinMaxScaler
19
- from sklearn.model_selection import train_test_split
20
-
21
- df = pd.read_csv('Housing.csv')
22
- cat_columns = ['mainroad',
23
- 'guestroom', 'basement', 'hotwaterheating', 'airconditioning',
24
- 'prefarea']
25
-
26
- def binary_mapping(x):
27
- return x.map({'yes': 1, "no": 0})
28
-
29
- df[cat_columns] = df[cat_columns].apply(binary_mapping)
30
-
31
-
32
- ohe = OneHotEncoder(sparse=False, handle_unknown='error', drop='first')
33
- ohe_df = pd.DataFrame(ohe.fit_transform(df[['furnishingstatus']]))
34
-
35
- ohe_df.columns = ohe.get_feature_names(['status'])
36
-
37
- df = pd.concat([df,ohe_df], axis=1)
38
- df.drop(['furnishingstatus'], axis = 1, inplace = True)
39
- df.head()
40
-
41
-
42
-
43
- df_new = df.copy(deep=True)
44
- num_columns = ['area', 'bedrooms', 'bathrooms', 'stories','parking']
45
-
46
-
47
-
48
- scaler = MinMaxScaler().fit(df_new[num_columns])
49
- df_new[num_columns] = scaler.transform(df_new[num_columns])
50
- y = df_new.pop('price')
51
- x = df_new
52
- x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=42)
53
- model = RandomForestRegressor()
54
- model.fit(x_train, y_train)
55
-
56
- def prediction(properties):
57
- print(properties)
58
- df = pd.DataFrame(properties, columns=x_test.columns)
59
- print(df)
60
- df = df[x_test.columns].iloc[0].to_frame().T
61
- df[num_columns] = scaler.transform(df[num_columns])
62
- return model.predict(df)
63
- example = pd.DataFrame([7420, 4, 2, 3, 1, 0, 0, 0, 1, 2, 1, 0, 0]).T
64
- example.columns = x_test.columns
65
-
66
- demo = gr.Interface(
67
- prediction,
68
- [
69
- gr.Dataframe(
70
- headers=['area', 'bedrooms', 'bathrooms', 'stories', 'mainroad', 'guestroom',
71
- 'basement', 'hotwaterheating', 'airconditioning', 'parking', 'prefarea',
72
- 'status_semi-furnished', 'status_unfurnished'],
73
- datatype=["number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number", "number"],
74
- )
75
-
76
- ],
77
- "number",
78
- description="Enter The Properties Of The Home",
79
- title="California Housing Prices Prediction",
80
- examples=[example],
81
-
82
-
83
- )
84
-
85
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DiamondYin/AnewGame/Build/WaliwebGLgameFPS.loader.js DELETED
@@ -1 +0,0 @@
1
- function createUnityInstance(t,n,c){function s(e,t){if(!s.aborted&&n.showBanner)return"error"==t&&(s.aborted=!0),n.showBanner(e,t);switch(t){case"error":console.error(e);break;case"warning":console.warn(e);break;default:console.log(e)}}function r(e){var t=e.reason||e.error,n=t?t.toString():e.message||e.reason||"",r=t&&t.stack?t.stack.toString():"";(n+="\n"+(r=r.startsWith(n)?r.substring(n.length):r).trim())&&l.stackTraceRegExp&&l.stackTraceRegExp.test(n)&&D(n,e.filename||t&&(t.fileName||t.sourceURL)||"",e.lineno||t&&(t.lineNumber||t.line)||0)}function e(e,t,n){var r=e[t];void 0!==r&&r||(console.warn('Config option "'+t+'" is missing or empty. Falling back to default value: "'+n+'". Consider updating your WebGL template to include the missing config option.'),e[t]=n)}c=c||function(){};var o,l={canvas:t,webglContextAttributes:{preserveDrawingBuffer:!1,powerPreference:2},cacheControl:function(e){return e==l.dataUrl||e.match(/\.bundle/)?"must-revalidate":"no-store"},streamingAssetsUrl:"StreamingAssets",downloadProgress:{},deinitializers:[],intervals:{},setInterval:function(e,t){e=window.setInterval(e,t);return this.intervals[e]=!0,e},clearInterval:function(e){delete this.intervals[e],window.clearInterval(e)},preRun:[],postRun:[],print:function(e){console.log(e)},printErr:function(e){console.error(e),"string"==typeof e&&-1!=e.indexOf("wasm streaming compile failed")&&(-1!=e.toLowerCase().indexOf("mime")?s('HTTP Response Header "Content-Type" configured incorrectly on the server for file '+l.codeUrl+' , should be "application/wasm". Startup time performance will suffer.',"warning"):s('WebAssembly streaming compilation failed! This can happen for example if "Content-Encoding" HTTP header is incorrectly enabled on the server for file '+l.codeUrl+", but the file is not pre-compressed on disk (or vice versa). Check the Network tab in browser Devtools to debug server header configuration.","warning"))},locateFile:function(e){return"build.wasm"==e?this.codeUrl:e},disabledCanvasEvents:["contextmenu","dragstart"]};for(o in e(n,"companyName","Unity"),e(n,"productName","WebGL Player"),e(n,"productVersion","1.0"),n)l[o]=n[o];l.streamingAssetsUrl=new URL(l.streamingAssetsUrl,document.URL).href;var a=l.disabledCanvasEvents.slice();function i(e){e.preventDefault()}a.forEach(function(e){t.addEventListener(e,i)}),window.addEventListener("error",r),window.addEventListener("unhandledrejection",r);var u="",d="";function h(e){document.webkitCurrentFullScreenElement===t?t.style.width&&(u=t.style.width,d=t.style.height,t.style.width="100%",t.style.height="100%"):u&&(t.style.width=u,t.style.height=d,d=u="")}document.addEventListener("webkitfullscreenchange",h),l.deinitializers.push(function(){for(var e in l.disableAccessToMediaDevices(),a.forEach(function(e){t.removeEventListener(e,i)}),window.removeEventListener("error",r),window.removeEventListener("unhandledrejection",r),document.removeEventListener("webkitfullscreenchange",h),l.intervals)window.clearInterval(e);l.intervals={}}),l.QuitCleanup=function(){for(var e=0;e<l.deinitializers.length;e++)l.deinitializers[e]();l.deinitializers=[],"function"==typeof l.onQuit&&l.onQuit()};var f,p,m,g,b,v,w,y,S,C={Module:l,SetFullscreen:function(){if(l.SetFullscreen)return l.SetFullscreen.apply(l,arguments);l.print("Failed to set Fullscreen mode: Player not loaded yet.")},SendMessage:function(){if(l.SendMessage)return l.SendMessage.apply(l,arguments);l.print("Failed to execute SendMessage: Player not loaded yet.")},Quit:function(){return new Promise(function(e,t){l.shouldQuit=!0,l.onQuit=e})},GetMemoryInfo:function(){var e=l._getMemInfo();return{totalWASMHeapSize:l.HEAPU32[e>>2],usedWASMHeapSize:l.HEAPU32[1+(e>>2)],totalJSHeapSize:l.HEAPF64[1+(e>>3)],usedJSHeapSize:l.HEAPF64[2+(e>>3)]}}};function D(e,t,n){-1==e.indexOf("fullscreen error")&&(l.startupErrorHandler?l.startupErrorHandler(e,t,n):l.errorHandler&&l.errorHandler(e,t,n)||(console.log("Invoking error handler due to\n"+e),"function"==typeof dump&&dump("Invoking error handler due to\n"+e),D.didShowErrorMessage||(-1!=(e="An error occurred running the Unity content on this page. See your browser JavaScript console for more info. The error was:\n"+e).indexOf("DISABLE_EXCEPTION_CATCHING")?e="An exception has occurred, but exception handling has been disabled in this build. If you are the developer of this content, enable exceptions in your project WebGL player settings to be able to catch the exception or see the stack trace.":-1!=e.indexOf("Cannot enlarge memory arrays")?e="Out of memory. If you are the developer of this content, try allocating more memory to your WebGL build in the WebGL player settings.":-1==e.indexOf("Invalid array buffer length")&&-1==e.indexOf("Invalid typed array length")&&-1==e.indexOf("out of memory")&&-1==e.indexOf("could not allocate memory")||(e="The browser could not allocate enough memory for the WebGL content. If you are the developer of this content, try allocating less memory to your WebGL build in the WebGL player settings."),alert(e),D.didShowErrorMessage=!0)))}function P(e,t){if("symbolsUrl"!=e){var n=l.downloadProgress[e],r=(n=n||(l.downloadProgress[e]={started:!1,finished:!1,lengthComputable:!1,total:0,loaded:0}),"object"!=typeof t||"progress"!=t.type&&"load"!=t.type||(n.started||(n.started=!0,n.lengthComputable=t.lengthComputable),n.total=t.total,n.loaded=t.loaded,"load"==t.type&&(n.finished=!0)),0),o=0,a=0,i=0,s=0;for(e in l.downloadProgress){if(!(n=l.downloadProgress[e]).started)return;a++,n.lengthComputable?(r+=n.loaded,o+=n.total,i++):n.finished||s++}c(.9*(a?(a-s-(o?i*(o-r)/o:0))/a:0))}}function x(){var e=this;this.isConnected=this.connect().then(function(){return e.cleanUpCache()}),this.isConnected.catch(function(e){e="Error when initializing cache: "+e,console.log("[UnityCache] "+e)})}function E(e){console.log("[UnityCache] "+e)}function U(e){return U.link=U.link||document.createElement("a"),U.link.href=e,U.link.href}function T(){new Promise(function(a,e){var i=document.createElement("script");i.src=l.frameworkUrl,i.onload=function(){if("undefined"==typeof unityFramework||!unityFramework){var e,t=[["br","br"],["gz","gzip"]];for(e in t){var n,r=t[e];if(l.frameworkUrl.endsWith("."+r[0]))return n="Unable to parse "+l.frameworkUrl+"!","file:"==location.protocol?void s(n+" Loading pre-compressed (brotli or gzip) content via a file:// URL without a web server is not supported by this browser. Please use a local development web server to host compressed Unity content, or use the Unity Build and Run option.","error"):(n+=' This can happen if build compression was enabled but web server hosting the content was misconfigured to not serve the file with HTTP Response Header "Content-Encoding: '+r[1]+'" present. Check browser Console and Devtools Network tab to debug.',"br"==r[0]&&"http:"==location.protocol&&(r=-1!=["localhost","127.0.0.1"].indexOf(location.hostname)?"":"Migrate your server to use HTTPS.",n=/Firefox/.test(navigator.userAgent)?"Unable to parse "+l.frameworkUrl+'!<br>If using custom web server, verify that web server is sending .br files with HTTP Response Header "Content-Encoding: br". Brotli compression may not be supported in Firefox over HTTP connections. '+r+' See <a href="https://bugzilla.mozilla.org/show_bug.cgi?id=1670675">https://bugzilla.mozilla.org/show_bug.cgi?id=1670675</a> for more information.':"Unable to parse "+l.frameworkUrl+'!<br>If using custom web server, verify that web server is sending .br files with HTTP Response Header "Content-Encoding: br". Brotli compression may not be supported over HTTP connections. Migrate your server to use HTTPS.'),void s(n,"error"))}s("Unable to parse "+l.frameworkUrl+"! The file is corrupt, or compression was misconfigured? (check Content-Encoding HTTP Response Header on web server)","error")}var o=unityFramework;unityFramework=null,i.onload=null,a(o)},i.onerror=function(e){s("Unable to load file "+l.frameworkUrl+"! Check that the file exists on the remote server. (also check browser Console and Devtools Network tab to debug)","error")},document.body.appendChild(i),l.deinitializers.push(function(){document.body.removeChild(i)})}).then(function(e){e(l)});P(n="dataUrl"),e=l.cacheControl(l[n]),t=l.companyName&&l.productName?l.cachedFetch:l.fetchWithProgress,r=l[n],r=/file:\/\//.exec(r)?"same-origin":void 0;var n,e,t,r,o=t(l[n],{method:"GET",companyName:l.companyName,productName:l.productName,productVersion:l.productVersion,control:e,mode:r,onProgress:function(e){P(n,e)}}).then(function(e){return e.parsedBody}).catch(function(e){var t="Failed to download file "+l[n];"file:"==location.protocol?s(t+". Loading web pages via a file:// URL without a web server is not supported by this browser. Please use a local development web server to host Unity content, or use the Unity Build and Run option.","error"):console.error(t)});l.preRun.push(function(){l.addRunDependency("dataUrl"),o.then(function(e){var t=new DataView(e.buffer,e.byteOffset,e.byteLength),n=0,r="UnityWebData1.0\0";if(!String.fromCharCode.apply(null,e.subarray(n,n+r.length))==r)throw"unknown data format";var o=t.getUint32(n+=r.length,!0);for(n+=4;n<o;){var a=t.getUint32(n,!0),i=(n+=4,t.getUint32(n,!0)),s=(n+=4,t.getUint32(n,!0)),c=(n+=4,String.fromCharCode.apply(null,e.subarray(n,n+s)));n+=s;for(var u=0,d=c.indexOf("/",u)+1;0<d;u=d,d=c.indexOf("/",u)+1)l.FS_createPath(c.substring(0,u),c.substring(u,d-1),!0,!0);l.FS_createDataFile(c,null,e.subarray(a,a+i),!0,!0,!0)}l.removeRunDependency("dataUrl")})})}return l.SystemInfo=function(){var e,t,n,r,o=navigator.userAgent+" ",a=[["Firefox","Firefox"],["OPR","Opera"],["Edg","Edge"],["SamsungBrowser","Samsung Browser"],["Trident","Internet Explorer"],["MSIE","Internet Explorer"],["Chrome","Chrome"],["CriOS","Chrome on iOS Safari"],["FxiOS","Firefox on iOS Safari"],["Safari","Safari"]];function i(e,t,n){return(e=RegExp(e,"i").exec(t))&&e[n]}for(var s=0;s<a.length;++s)if(t=i(a[s][0]+"[/ ](.*?)[ \\)]",o,1)){e=a[s][1];break}"Safari"==e&&(t=i("Version/(.*?) ",o,1)),"Internet Explorer"==e&&(t=i("rv:(.*?)\\)? ",o,1)||t);for(var c=[["Windows (.*?)[;)]","Windows"],["Android ([0-9_.]+)","Android"],["iPhone OS ([0-9_.]+)","iPhoneOS"],["iPad.*? OS ([0-9_.]+)","iPadOS"],["FreeBSD( )","FreeBSD"],["OpenBSD( )","OpenBSD"],["Linux|X11()","Linux"],["Mac OS X ([0-9_\\.]+)","MacOS"],["bot|google|baidu|bing|msn|teoma|slurp|yandex","Search Bot"]],u=0;u<c.length;++u)if(d=i(c[u][0],o,1)){n=c[u][1],d=d.replace(/_/g,".");break}var d={"NT 5.0":"2000","NT 5.1":"XP","NT 5.2":"Server 2003","NT 6.0":"Vista","NT 6.1":"7","NT 6.2":"8","NT 6.3":"8.1","NT 10.0":"10"}[d]||d,l=((l=document.createElement("canvas"))&&(gl=l.getContext("webgl2"),glVersion=gl?2:0,gl||(gl=l&&l.getContext("webgl"))&&(glVersion=1),gl&&(r=gl.getExtension("WEBGL_debug_renderer_info")&&gl.getParameter(37446)||gl.getParameter(7937))),"undefined"!=typeof SharedArrayBuffer),h="object"==typeof WebAssembly&&"function"==typeof WebAssembly.compile;return{width:screen.width,height:screen.height,userAgent:o.trim(),browser:e||"Unknown browser",browserVersion:t||"Unknown version",mobile:/Mobile|Android|iP(ad|hone)/.test(navigator.appVersion),os:n||"Unknown OS",osVersion:d||"Unknown OS Version",gpu:r||"Unknown GPU",language:navigator.userLanguage||navigator.language,hasWebGL:glVersion,hasCursorLock:!!document.body.requestPointerLock,hasFullscreen:!!document.body.requestFullscreen||!!document.body.webkitRequestFullscreen,hasThreads:l,hasWasm:h,hasWasmThreads:!1}}(),l.abortHandler=function(e){return D(e,"",0),!0},Error.stackTraceLimit=Math.max(Error.stackTraceLimit||0,50),l.readBodyWithProgress=function(a,i,s){var e=a.body?a.body.getReader():void 0,c=void 0!==a.headers.get("Content-Length"),u=function(e,t){if(!t)return 0;var t=e.headers.get("Content-Encoding"),n=parseInt(e.headers.get("Content-Length"));switch(t){case"br":return Math.round(5*n);case"gzip":return Math.round(4*n);default:return n}}(a,c),d=new Uint8Array(u),l=[],h=0,f=0;return c||console.warn("[UnityCache] Response is served without Content-Length header. Please reconfigure server to include valid Content-Length for better download performance."),function o(){return void 0===e?a.arrayBuffer().then(function(e){var t=new Uint8Array(e);return i({type:"progress",response:a,total:e.length,loaded:0,lengthComputable:c,chunk:s?t:null}),t}):e.read().then(function(e){if(e.done){if(h===u)return d;if(h<u)return d.slice(0,h);for(var t=new Uint8Array(h),n=(t.set(d,0),f),r=0;r<l.length;++r)t.set(l[r],n),n+=l[r].length;return t}return h+e.value.length<=d.length?(d.set(e.value,h),f=h+e.value.length):l.push(e.value),h+=e.value.length,i({type:"progress",response:a,total:Math.max(u,h),loaded:h,lengthComputable:c,chunk:s?e.value:null}),o()})}().then(function(e){return i({type:"load",response:a,total:e.length,loaded:e.length,lengthComputable:c,chunk:null}),a.parsedBody=e,a})},l.fetchWithProgress=function(e,t){var n=function(){};return t&&t.onProgress&&(n=t.onProgress),fetch(e,t).then(function(e){return l.readBodyWithProgress(e,n,t.enableStreamingDownload)})},l.UnityCache=(f={name:"UnityCache",version:4},p={name:"RequestMetaDataStore",version:1},m="RequestStore",g="WebAssembly",b=window.indexedDB||window.mozIndexedDB||window.webkitIndexedDB||window.msIndexedDB,v=null,x.getInstance=function(){return v=v||new x},x.destroyInstance=function(){return v?v.close().then(function(){v=null}):Promise.resolve()},x.prototype.clearCache=function(){var r=this;return this.isConnected.then(function(){return r.execute(p.name,"clear",[])}).then(function(){return r.cache.keys()}).then(function e(t){var n;return 0===t.length?Promise.resolve():(n=t.pop(),r.cache.delete(n).then(function(){return e(t)}))})},x.UnityCacheDatabase=f,x.RequestMetaDataStore=p,x.MaximumCacheSize=1073741824,x.prototype.loadRequest=function(e){var t=this;return t.isConnected.then(function(){return Promise.all([t.cache.match(e),t.loadRequestMetaData(e)])}).then(function(e){if(void 0!==e[0]&&void 0!==e[1])return{response:e[0],metaData:e[1]}})},x.prototype.loadRequestMetaData=function(e){e="string"==typeof e?e:e.url;return this.execute(p.name,"get",[e])},x.prototype.updateRequestMetaData=function(e){return this.execute(p.name,"put",[e])},x.prototype.storeRequest=function(e,t){var n=this;return n.isConnected.then(function(){return n.cache.put(e,t)})},x.prototype.close=function(){return this.isConnected.then(function(){this.database&&(this.database.close(),this.database=null),this.cache&&(this.cache=null)}.bind(this))},x.prototype.connect=function(){var o=this;return void 0===b?Promise.reject(new Error("Could not connect to cache: IndexedDB is not supported.")):void 0===window.caches?Promise.reject(new Error("Could not connect to cache: Cache API is not supported.")):new Promise(function(t,n){try{function r(){o.openDBTimeout&&(clearTimeout(o.openDBTimeout),o.openDBTimeout=null)}o.openDBTimeout=setTimeout(function(){void 0===o.database&&n(new Error("Could not connect to cache: Database timeout."))},2e4);var e=b.open(f.name,f.version);e.onupgradeneeded=o.upgradeDatabase.bind(o),e.onsuccess=function(e){r(),o.database=e.target.result,t()},e.onerror=function(e){r(),o.database=null,n(new Error("Could not connect to database."))}}catch(e){r(),o.database=null,o.cache=null,n(new Error("Could not connect to cache: Could not connect to database."))}}).then(function(){var e=f.name+"_"+l.companyName+"_"+l.productName;return caches.open(e)}).then(function(e){o.cache=e})},x.prototype.upgradeDatabase=function(e){var t,e=e.target.result;e.objectStoreNames.contains(p.name)||(t=e.createObjectStore(p.name,{keyPath:"url"}),["accessedAt","updatedAt"].forEach(function(e){t.createIndex(e,e)})),e.objectStoreNames.contains(m)&&e.deleteObjectStore(m),e.objectStoreNames.contains(g)&&e.deleteObjectStore(g)},x.prototype.execute=function(a,i,s){return this.isConnected.then(function(){return new Promise(function(t,n){try{var e,r,o;null===this.database?n(new Error("indexedDB access denied")):(e=-1!=["put","delete","clear"].indexOf(i)?"readwrite":"readonly",r=this.database.transaction([a],e).objectStore(a),"openKeyCursor"==i&&(r=r.index(s[0]),s=s.slice(1)),(o=r[i].apply(r,s)).onsuccess=function(e){t(e.target.result)},o.onerror=function(e){n(e)})}catch(e){n(e)}}.bind(this))}.bind(this))},x.prototype.getMetaDataEntries=function(){var r=this,o=0,a=[];return new Promise(function(t,n){var e=r.database.transaction([p.name],"readonly").objectStore(p.name).openCursor();e.onsuccess=function(e){e=e.target.result;e?(o+=e.value.size,a.push(e.value),e.continue()):t({metaDataEntries:a,cacheSize:o})},e.onerror=function(e){n(e)}})},x.prototype.cleanUpCache=function(){var i=this;return this.getMetaDataEntries().then(function(e){for(var t=e.metaDataEntries,n=e.cacheSize,r=[],o=[],a=0;a<t.length;++a)t[a].version==l.productVersion?o.push(t[a]):(r.push(t[a]),n-=t[a].size);o.sort(function(e,t){return e.accessedAt-t.accessedAt});for(a=0;a<o.length&&!(n<x.MaximumCacheSize);++a)r.push(o[a]),n-=o[a].size;return function e(){var t;return 0===r.length?Promise.resolve():(t=r.pop(),i.cache.delete(t.url).then(function(e){if(e)return r=t.url,new Promise(function(e,t){var n=i.database.transaction([p.name],"readwrite");n.objectStore(p.name).delete(r),n.oncomplete=e,n.onerror=t});var r}).then(e))}()})},x),l.cachedFetch=(w=l.UnityCache,y=l.fetchWithProgress,S=l.readBodyWithProgress,function(o,a){var e,t,i=w.getInstance(),s=U("string"==typeof o?o:o.url),c={enabled:(e=s,(!(t=a)||!t.method||"GET"===t.method)&&((!t||-1!=["must-revalidate","immutable"].indexOf(t.control))&&!!e.match("^https?://")))};function u(n,r){return fetch(n,r).then(function(e){var t;return!c.enabled||c.revalidated?e:304===e.status?(c.revalidated=!0,i.updateRequestMetaData(c.metaData).then(function(){E("'"+c.metaData.url+"' successfully revalidated and served from the indexedDB cache")}).catch(function(e){E("'"+c.metaData.url+"' successfully revalidated but not stored in the indexedDB cache due to the error: "+e)}),S(c.response,r.onProgress,r.enableStreamingDownload)):200==e.status?(c.response=e,c.metaData.updatedAt=c.metaData.accessedAt,c.revalidated=!0,t=e.clone(),S(e,r.onProgress,r.enableStreamingDownload).then(function(e){return c.metaData.size=e.parsedBody.length,Promise.all([i.storeRequest(n,t),i.updateRequestMetaData(c.metaData)]).then(function(){E("'"+s+"' successfully downloaded and stored in the indexedDB cache")}).catch(function(e){E("'"+s+"' successfully downloaded but not stored in the indexedDB cache due to the error: "+e)}),e})):(E("'"+s+"' request failed with status: "+e.status+" "+e.statusText),S(e,r.onProgress,r.enableStreamingDownload))})}return a&&(c.control=a.control,c.companyName=a.companyName,c.productName=a.productName,c.productVersion=a.productVersion),c.revalidated=!1,c.metaData={url:s,accessedAt:Date.now(),version:c.productVersion},c.response=null,c.enabled?i.loadRequest(s).then(function(e){var n,r,t;return e?(n=e.response,r=e.metaData,c.response=n,c.metaData.size=r.size,c.metaData.updatedAt=r.updatedAt,"immutable"==c.control?(c.revalidated=!0,i.updateRequestMetaData(r).then(function(){E("'"+c.metaData.url+"' served from the indexedDB cache without revalidation")}),S(n,a.onProgress,a.enableStreamingDownload)):(e=s,(t=window.location.href.match(/^[a-z]+:\/\/[^\/]+/))&&!e.lastIndexOf(t[0],0)||!n.headers.get("Last-Modified")&&!n.headers.get("ETag")?(e=(a=a||{}).headers||{},a.headers=e,n.headers.get("Last-Modified")?(e["If-Modified-Since"]=n.headers.get("Last-Modified"),e["Cache-Control"]="no-cache"):n.headers.get("ETag")&&(e["If-None-Match"]=n.headers.get("ETag"),e["Cache-Control"]="no-cache"),u(o,a)):fetch(s,{method:"HEAD"}).then(function(t){return c.revalidated=["Last-Modified","ETag"].every(function(e){return!n.headers.get(e)||n.headers.get(e)==t.headers.get(e)}),c.revalidated?(i.updateRequestMetaData(r).then(function(){E("'"+c.metaData.url+"' successfully revalidated and served from the indexedDB cache")}),S(c.response,a.onProgress,a.enableStreamingDownload)):u(o,a)}))):u(o,a)}).catch(function(e){return E("Failed to load '"+c.metaData.url+"' from indexedDB cache due to the error: "+e),y(o,a)}):y(o,a)}),new Promise(function(e,t){var n;l.SystemInfo.hasWebGL?1==l.SystemInfo.hasWebGL?(n='Your browser does not support graphics API "WebGL 2" which is required for this content.',"Safari"==l.SystemInfo.browser&&parseInt(l.SystemInfo.browserVersion)<15&&(l.SystemInfo.mobile||1<navigator.maxTouchPoints?n+="\nUpgrade to iOS 15 or later.":n+="\nUpgrade to Safari 15 or later."),t(n)):l.SystemInfo.hasWasm?(l.startupErrorHandler=t,c(0),l.postRun.push(function(){c(1),delete l.startupErrorHandler,e(C)}),T()):t("Your browser does not support WebAssembly."):t("Your browser does not support WebGL.")})}
 
 
spaces/Duckichan1/Jen_/Dockerfile DELETED
@@ -1 +0,0 @@
1
- 1
 
 
spaces/Dukcar/Pix2Pix-Video/style.css DELETED
@@ -1,101 +0,0 @@
1
- #col-container {max-width: 820px; margin-left: auto; margin-right: auto;}
2
- #duplicate-container{
3
- display: flex;
4
- justify-content: space-between;
5
- align-items: center;
6
- line-height: 1em;
7
- flex-direction: row-reverse;
8
- font-size:1em;
9
- }
10
- a, a:hover, a:visited {
11
- text-decoration-line: underline;
12
- font-weight: 600;
13
- color: #1f2937 !important;
14
- }
15
-
16
- .dark a, .dark a:hover, .dark a:visited {
17
- color: #f3f4f6 !important;
18
- }
19
-
20
- .footer {
21
- margin-bottom: 45px;
22
- margin-top: 10px;
23
- text-align: center;
24
- border-bottom: 1px solid #e5e5e5;
25
- }
26
-
27
- .footer>p {
28
- font-size: .8rem!important;
29
- display: inline-block;
30
- padding: 0 10px;
31
- transform: translateY(26px);
32
- background: white;
33
- }
34
- .dark .footer {
35
- border-color: #303030;
36
- }
37
- .dark .footer>p {
38
- background: #0b0f19;
39
- }
40
-
41
- div#may-like-container > p {
42
- font-size: .8em;
43
- margin-bottom: 4px;
44
- }
45
-
46
- .animate-spin {
47
- animation: spin 1s linear infinite;
48
- }
49
-
50
- @keyframes spin {
51
- from {
52
- transform: rotate(0deg);
53
- }
54
- to {
55
- transform: rotate(360deg);
56
- }
57
- }
58
-
59
- #share-btn-container {
60
- display: flex;
61
- padding-left: 0.5rem !important;
62
- padding-right: 0.5rem !important;
63
- background-color: #000000;
64
- justify-content: center;
65
- align-items: center;
66
- border-radius: 9999px !important;
67
- max-width: 13rem;
68
- }
69
-
70
- #share-btn-container:hover {
71
- background-color: #060606;
72
- }
73
-
74
- #share-btn {
75
- all: initial;
76
- color: #ffffff;
77
- font-weight: 600;
78
- cursor:pointer;
79
- font-family: 'IBM Plex Sans', sans-serif;
80
- margin-left: 0.5rem !important;
81
- padding-top: 0.5rem !important;
82
- padding-bottom: 0.5rem !important;
83
- right:0;
84
- }
85
-
86
- #share-btn * {
87
- all: unset;
88
- }
89
-
90
- #share-btn-container div:nth-child(-n+2){
91
- width: auto !important;
92
- min-height: 0px !important;
93
- }
94
-
95
- #share-btn-container .wrap {
96
- display: none !important;
97
- }
98
-
99
- #share-btn-container.hidden {
100
- display: none!important;
101
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EPFL-VILAB/MultiMAE/utils/layers/drop.py DELETED
@@ -1,176 +0,0 @@
1
- # --------------------------------------------------------
2
- # Based on timm and MAE-priv code bases
3
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm
4
- # https://github.com/BUPT-PRIV/MAE-priv
5
- # --------------------------------------------------------
6
-
7
- """ DropBlock, DropPath
8
-
9
- PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
10
-
11
- Papers:
12
- DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
13
-
14
- Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
15
-
16
- Code:
17
- DropBlock impl inspired by two Tensorflow impl that I liked:
18
- - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
19
- - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
20
-
21
- Hacked together by / Copyright 2020 Ross Wightman
22
- """
23
- import torch
24
- import torch.nn as nn
25
- import torch.nn.functional as F
26
-
27
-
28
- def drop_block_2d(
29
- x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0,
30
- with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
31
- """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
32
-
33
- DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
34
- runs with success, but needs further validation and possibly optimization for lower runtime impact.
35
- """
36
- B, C, H, W = x.shape
37
- total_size = W * H
38
- clipped_block_size = min(block_size, min(W, H))
39
- # seed_drop_rate, the gamma parameter
40
- gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
41
- (W - block_size + 1) * (H - block_size + 1))
42
-
43
- # Forces the block to be inside the feature map.
44
- w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))
45
- valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
46
- ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
47
- valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
48
-
49
- if batchwise:
50
- # one mask for whole batch, quite a bit faster
51
- uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
52
- else:
53
- uniform_noise = torch.rand_like(x)
54
- block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
55
- block_mask = -F.max_pool2d(
56
- -block_mask,
57
- kernel_size=clipped_block_size, # block_size,
58
- stride=1,
59
- padding=clipped_block_size // 2)
60
-
61
- if with_noise:
62
- normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
63
- if inplace:
64
- x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
65
- else:
66
- x = x * block_mask + normal_noise * (1 - block_mask)
67
- else:
68
- normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
69
- if inplace:
70
- x.mul_(block_mask * normalize_scale)
71
- else:
72
- x = x * block_mask * normalize_scale
73
- return x
74
-
75
-
76
- def drop_block_fast_2d(
77
- x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,
78
- gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):
79
- """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
80
-
81
- DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
82
- block mask at edges.
83
- """
84
- B, C, H, W = x.shape
85
- total_size = W * H
86
- clipped_block_size = min(block_size, min(W, H))
87
- gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
88
- (W - block_size + 1) * (H - block_size + 1))
89
-
90
- if batchwise:
91
- # one mask for whole batch, quite a bit faster
92
- block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma
93
- else:
94
- # mask per batch element
95
- block_mask = torch.rand_like(x) < gamma
96
- block_mask = F.max_pool2d(
97
- block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
98
-
99
- if with_noise:
100
- normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
101
- if inplace:
102
- x.mul_(1. - block_mask).add_(normal_noise * block_mask)
103
- else:
104
- x = x * (1. - block_mask) + normal_noise * block_mask
105
- else:
106
- block_mask = 1 - block_mask
107
- normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype)
108
- if inplace:
109
- x.mul_(block_mask * normalize_scale)
110
- else:
111
- x = x * block_mask * normalize_scale
112
- return x
113
-
114
-
115
- class DropBlock2d(nn.Module):
116
- """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
117
- """
118
-
119
- def __init__(self,
120
- drop_prob=0.1,
121
- block_size=7,
122
- gamma_scale=1.0,
123
- with_noise=False,
124
- inplace=False,
125
- batchwise=False,
126
- fast=True):
127
- super(DropBlock2d, self).__init__()
128
- self.drop_prob = drop_prob
129
- self.gamma_scale = gamma_scale
130
- self.block_size = block_size
131
- self.with_noise = with_noise
132
- self.inplace = inplace
133
- self.batchwise = batchwise
134
- self.fast = fast # FIXME finish comparisons of fast vs not
135
-
136
- def forward(self, x):
137
- if not self.training or not self.drop_prob:
138
- return x
139
- if self.fast:
140
- return drop_block_fast_2d(
141
- x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
142
- else:
143
- return drop_block_2d(
144
- x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
145
-
146
-
147
- def drop_path(x, drop_prob: float = 0., training: bool = False):
148
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
149
-
150
- This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
151
- the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
152
- See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
153
- changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
154
- 'survival rate' as the argument.
155
-
156
- """
157
- if drop_prob == 0. or not training:
158
- return x
159
- keep_prob = 1 - drop_prob
160
- shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
161
- random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
162
- random_tensor.floor_() # binarize
163
- output = x.div(keep_prob) * random_tensor
164
- return output
165
-
166
-
167
- class DropPath(nn.Module):
168
- """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
169
- """
170
-
171
- def __init__(self, drop_prob=None):
172
- super(DropPath, self).__init__()
173
- self.drop_prob = drop_prob
174
-
175
- def forward(self, x):
176
- return drop_path(x, self.drop_prob, self.training)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Fengbinbin/gpt-academic/crazy_functions/解析JupyterNotebook.py DELETED
@@ -1,145 +0,0 @@
1
- from toolbox import update_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file
3
- fast_debug = True
4
-
5
-
6
- class PaperFileGroup():
7
- def __init__(self):
8
- self.file_paths = []
9
- self.file_contents = []
10
- self.sp_file_contents = []
11
- self.sp_file_index = []
12
- self.sp_file_tag = []
13
-
14
- # count_token
15
- from request_llm.bridge_all import model_info
16
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
17
- def get_token_num(txt): return len(
18
- enc.encode(txt, disallowed_special=()))
19
- self.get_token_num = get_token_num
20
-
21
- def run_file_split(self, max_token_limit=1900):
22
- """
23
- 将长文本分离开来
24
- """
25
- for index, file_content in enumerate(self.file_contents):
26
- if self.get_token_num(file_content) < max_token_limit:
27
- self.sp_file_contents.append(file_content)
28
- self.sp_file_index.append(index)
29
- self.sp_file_tag.append(self.file_paths[index])
30
- else:
31
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
32
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(
33
- file_content, self.get_token_num, max_token_limit)
34
- for j, segment in enumerate(segments):
35
- self.sp_file_contents.append(segment)
36
- self.sp_file_index.append(index)
37
- self.sp_file_tag.append(
38
- self.file_paths[index] + f".part-{j}.txt")
39
-
40
-
41
-
42
- def parseNotebook(filename, enable_markdown=1):
43
- import json
44
-
45
- CodeBlocks = []
46
- with open(filename, 'r', encoding='utf-8', errors='replace') as f:
47
- notebook = json.load(f)
48
- for cell in notebook['cells']:
49
- if cell['cell_type'] == 'code' and cell['source']:
50
- # remove blank lines
51
- cell['source'] = [line for line in cell['source'] if line.strip()
52
- != '']
53
- CodeBlocks.append("".join(cell['source']))
54
- elif enable_markdown and cell['cell_type'] == 'markdown' and cell['source']:
55
- cell['source'] = [line for line in cell['source'] if line.strip()
56
- != '']
57
- CodeBlocks.append("Markdown:"+"".join(cell['source']))
58
-
59
- Code = ""
60
- for idx, code in enumerate(CodeBlocks):
61
- Code += f"This is {idx+1}th code block: \n"
62
- Code += code+"\n"
63
-
64
- return Code
65
-
66
-
67
- def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
68
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
69
-
70
- enable_markdown = plugin_kwargs.get("advanced_arg", "1")
71
- try:
72
- enable_markdown = int(enable_markdown)
73
- except ValueError:
74
- enable_markdown = 1
75
-
76
- pfg = PaperFileGroup()
77
-
78
- for fp in file_manifest:
79
- file_content = parseNotebook(fp, enable_markdown=enable_markdown)
80
- pfg.file_paths.append(fp)
81
- pfg.file_contents.append(file_content)
82
-
83
- # <-------- 拆分过长的IPynb文件 ---------->
84
- pfg.run_file_split(max_token_limit=1024)
85
- n_split = len(pfg.sp_file_contents)
86
-
87
- inputs_array = [r"This is a Jupyter Notebook file, tell me about Each Block in Chinese. Focus Just On Code." +
88
- r"If a block starts with `Markdown` which means it's a markdown block in ipynbipynb. " +
89
- r"Start a new line for a block and block num use Chinese." +
90
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
91
- inputs_show_user_array = [f"{f}的分析如下" for f in pfg.sp_file_tag]
92
- sys_prompt_array = ["You are a professional programmer."] * n_split
93
-
94
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
95
- inputs_array=inputs_array,
96
- inputs_show_user_array=inputs_show_user_array,
97
- llm_kwargs=llm_kwargs,
98
- chatbot=chatbot,
99
- history_array=[[""] for _ in range(n_split)],
100
- sys_prompt_array=sys_prompt_array,
101
- # max_workers=5, # OpenAI所允许的最大并行过载
102
- scroller_max_len=80
103
- )
104
-
105
- # <-------- 整理结果,退出 ---------->
106
- block_result = " \n".join(gpt_response_collection)
107
- chatbot.append(("解析的结果如下", block_result))
108
- history.extend(["解析的结果如下", block_result])
109
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
110
-
111
- # <-------- 写入文件,退出 ---------->
112
- res = write_results_to_file(history)
113
- chatbot.append(("完成了吗?", res))
114
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
115
-
116
- @CatchException
117
- def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
118
- chatbot.append([
119
- "函数插件功能?",
120
- "对IPynb文件进行解析。Contributor: codycjy."])
121
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
122
-
123
- history = [] # 清空历史
124
- import glob
125
- import os
126
- if os.path.exists(txt):
127
- project_folder = txt
128
- else:
129
- if txt == "":
130
- txt = '空空如也的输入栏'
131
- report_execption(chatbot, history,
132
- a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
133
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
134
- return
135
- if txt.endswith('.ipynb'):
136
- file_manifest = [txt]
137
- else:
138
- file_manifest = [f for f in glob.glob(
139
- f'{project_folder}/**/*.ipynb', recursive=True)]
140
- if len(file_manifest) == 0:
141
- report_execption(chatbot, history,
142
- a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
143
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
144
- return
145
- yield from ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Frantz103/CaptionQuest/app.py DELETED
@@ -1,230 +0,0 @@
1
- from transformers import pipeline
2
- from PIL import Image
3
- import numpy as np
4
- import matplotlib.pyplot as plt
5
-
6
- from transformers import AutoProcessor, AutoModelForCausalLM, pipeline
7
- import torch
8
- from torchvision import transforms
9
-
10
-
11
- from sklearn.feature_extraction.text import CountVectorizer
12
- from sklearn.decomposition import LatentDirichletAllocation
13
- from sklearn.feature_extraction.text import TfidfVectorizer
14
- from sklearn.metrics.pairwise import cosine_similarity
15
- import textstat
16
- import spacy
17
-
18
- import re
19
-
20
- # Initialize the processor and model for the large COCO model
21
- processor = AutoProcessor.from_pretrained("microsoft/git-large-coco")
22
- model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
23
-
24
- detection_pipe = pipeline("object-detection", model="facebook/detr-resnet-50")
25
- classification_pipe = pipeline("zero-shot-image-classification", model="openai/clip-vit-large-patch14")
26
-
27
- # Initialize the pipeline for the VIT model
28
- vit_pipeline = pipeline(task="image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
29
-
30
- # Move the COCO model to the device
31
- device = "cuda" if torch.cuda.is_available() else "cpu"
32
- model.to(device)
33
-
34
- def generate_text_and_caption(image):
35
- # Define the preprocessing pipeline for the image
36
- preprocess = transforms.Compose([
37
- transforms.Resize((256, 256)), # Resize to 256x256, change this to match the required dimensions
38
- transforms.CenterCrop(224), # Center crop to 224x224, change this to match the required dimensions
39
- transforms.ToTensor(),
40
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # Normalize with ImageNet mean and std
41
- ])
42
-
43
- # Apply the preprocessing pipeline to the image
44
- preprocessed_image = preprocess(image).unsqueeze(0).to(device) # unsqueeze to add batch dimension
45
-
46
- # For large COCO model
47
- generated_ids = model.generate(pixel_values=preprocessed_image, max_length=20)
48
- caption1 = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
49
-
50
- # For VIT model
51
- vit_output = vit_pipeline(image)
52
- caption2_info = vit_output[0] if vit_output else {"generated_text": "N/A"}
53
- caption2 = caption2_info.get('generated_text', 'N/A')
54
-
55
- return caption1, caption2
56
-
57
- def get_unique_refined_labels(image):
58
- original_output = detection_pipe(image)
59
- filtered_output = [item for item in original_output if item['score'] >= 0.95]
60
- unique_refined_labels = {}
61
- for item in filtered_output:
62
- box = item['box']
63
- label = item['label']
64
- xmin, ymin, xmax, ymax = box['xmin'], box['ymin'], box['xmax'], box['ymax']
65
- cropped_image = image.crop((xmin, ymin, xmax, ymax))
66
- predictions = classification_pipe(cropped_image, candidate_labels=[label])
67
- if predictions:
68
- top_prediction = sorted(predictions, key=lambda x: x['score'], reverse=True)[0]
69
- top_label = top_prediction['label']
70
- top_score = top_prediction['score']
71
- if top_label not in unique_refined_labels or unique_refined_labels[top_label] < top_score:
72
- unique_refined_labels[top_label] = top_score
73
- return unique_refined_labels, original_output, filtered_output
74
-
75
-
76
- # Load NLP model for entity extraction
77
- nlp = spacy.load("en_core_web_sm")
78
-
79
-
80
- def extract_main_words(text):
81
- doc = nlp(text)
82
- main_words = [token.lemma_ for token in doc if token.pos_ == 'NOUN']
83
- return main_words
84
-
85
- def get_topics(text):
86
- # Vectorize the text
87
- vectorizer = CountVectorizer()
88
- text_vec = vectorizer.fit_transform([text])
89
- # Fit LDA model to get topics
90
- lda = LatentDirichletAllocation(n_components=1, random_state=0)
91
- lda.fit(text_vec)
92
- # Get the top words per topic (assuming one topic for simplicity)
93
- feature_names = vectorizer.get_feature_names_out()
94
- top_words = [feature_names[i] for i in lda.components_[0].argsort()[:-10 - 1:-1]]
95
- return top_words
96
-
97
- def check_readability(caption):
98
- # Compute the Flesch Reading Ease score of the caption
99
- reading_ease_score = textstat.flesch_reading_ease(caption)
100
- return reading_ease_score
101
-
102
- def compute_similarity(caption1, caption2):
103
- vectorizer = TfidfVectorizer().fit_transform([caption1, caption2])
104
- vectors = vectorizer.toarray()
105
- cosine_sim = cosine_similarity(vectors)
106
- # The similarity between the captions is the off-diagonal value of the cosine_sim matrix
107
- similarity_score = cosine_sim[0, 1]
108
- return similarity_score
109
-
110
- def evaluate_caption(image, caption1, caption2, unique_refined_labels):
111
- # Scores initialization
112
- score_caption1 = 0
113
- score_caption2 = 0
114
-
115
- # Initialize object presence scores
116
- object_presence_score1 = 0
117
- object_presence_score2 = 0
118
-
119
- # Assume you have a function to extract main words
120
- main_words_caption1 = extract_main_words(caption1)
121
- main_words_caption2 = extract_main_words(caption2)
122
-
123
- # Check for object presence using unique_refined_labels
124
- object_presence_score1 += sum([1 for word in main_words_caption1 if word in unique_refined_labels.keys()])
125
- object_presence_score2 += sum([1 for word in main_words_caption2 if word in unique_refined_labels.keys()])
126
-
127
- # Entity Extraction
128
- entities_caption1 = [ent.text for ent in nlp(caption1).ents]
129
- entities_caption2 = [ent.text for ent in nlp(caption2).ents]
130
-
131
- # Check for object presence using unique_refined_labels
132
- score_caption1 += sum([1 for entity in entities_caption1 if entity in unique_refined_labels.keys()])
133
- score_caption2 += sum([1 for entity in entities_caption2 if entity in unique_refined_labels.keys()])
134
-
135
- # Topic Modeling
136
- topics_caption1 = get_topics(caption1)
137
- topics_caption2 = get_topics(caption2)
138
-
139
- # Check for topic relevance using unique_refined_labels
140
- score_caption1 += sum([1 for topic in topics_caption1 if topic in unique_refined_labels.keys()])
141
- score_caption2 += sum([1 for topic in topics_caption2 if topic in unique_refined_labels.keys()])
142
-
143
-
144
- # Implement custom rules
145
- def custom_rules(caption):
146
- score = 0
147
-
148
- # Rule for starting with a capital letter
149
- if not caption[0].isupper():
150
- score -= 1
151
-
152
- # Rule for ending with punctuation
153
- if caption[-1] not in ['.', '!', '?']:
154
- score -= 1
155
-
156
- return score
157
-
158
- # Custom rule scores
159
- custom_score1 = custom_rules(caption1)
160
- custom_score2 = custom_rules(caption2)
161
-
162
- # Update scores based on custom rules
163
- score_caption1 += custom_score1 # Note: if these were errors, you'd subtract instead
164
- score_caption2 += custom_score2
165
-
166
-
167
- # Check length
168
- length_caption1 = len(caption1.split())
169
- length_caption2 = len(caption2.split())
170
-
171
- if length_caption1 < 3: # assuming a reasonable caption should have at least 3 words
172
- score_caption1 -= 3 # arbitrary penalty
173
- if length_caption2 < 3:
174
- score_caption2 -= 3 # arbitrary penalty
175
-
176
- #Define similarity threshold
177
- similarity_score = compute_similarity(caption1, caption2)
178
-
179
- similarity_threshold = 0.9 # Replace this with whatever you consider "close enough"
180
-
181
- score_difference = abs(score_caption1 - score_caption2)
182
- score_threshold = 2 # Replace this with whatever you consider "close enough"
183
-
184
- if score_difference <= score_threshold:
185
- if similarity_score > similarity_threshold:
186
- readability_score_caption1 = check_readability(caption1)
187
- readability_score_caption2 = check_readability(caption2)
188
-
189
- return caption1 if readability_score_caption1 > readability_score_caption2 else caption2
190
- else:
191
- return caption1 if score_caption1 > score_caption2 else caption2
192
-
193
- # Fallback return statement
194
- return caption2 if score_caption2 > score_caption2 else caption1
195
-
196
- # Define the post_process_caption function
197
- def post_process_caption(caption):
198
- # Remove [unusedX] tokens, where X is any number
199
- cleaned_caption = re.sub(r'\[\s*unused\d+\s*\](, )? ?', '', caption)
200
- return cleaned_caption
201
-
202
- def process_image(image_path):
203
- image = Image.open(image_path).convert("RGB")
204
- caption1, caption2 = generate_text_and_caption(image)
205
- unique_refined_labels, _, _ = get_unique_refined_labels(image)
206
-
207
- # Update return values for caption1
208
- caption1 = post_process_caption(caption1)
209
-
210
- # evealuate the captions
211
- better_caption = evaluate_caption(image, caption1, caption2, unique_refined_labels)
212
-
213
- return caption1, caption2, better_caption
214
-
215
- import gradio as gr
216
-
217
- img_cap_ui = gr.Interface(
218
- fn=process_image,
219
- title="Image Captioning with Automatic Evaluation",
220
- description="Caution: this is a research experiment for personal use, please review the captions before using.",
221
- inputs=gr.inputs.Image(type="filepath",label="Add your image"),
222
- outputs=[gr.Textbox(label="Caption from the git-coco model", show_copy_button=True),
223
- gr.Textbox(label="Caption from the nlp-connect model", show_copy_button=True),
224
- gr.Textbox(label="Suggested caption after automatic evaluation", show_copy_button=True)],
225
- examples=["image_31.jpg","image_41.jpg","image_48.jpg", "image_50.jpg"],
226
- article="The caption evaluation method use a simple voting scheme from outputs of 2 additional models. This is an experiment, please make edit if you use the generated caption.",
227
- theme=gr.themes.Soft()
228
- )
229
-
230
- img_cap_ui.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/GT4SD/protein_properties/app.py DELETED
@@ -1,83 +0,0 @@
1
- import logging
2
- import pathlib
3
-
4
- import gradio as gr
5
- import numpy as np
6
- import pandas as pd
7
- from gt4sd.properties.proteins import PROTEIN_PROPERTY_PREDICTOR_FACTORY
8
-
9
- from utils import draw_grid_predict
10
-
11
- logger = logging.getLogger(__name__)
12
- logger.addHandler(logging.NullHandler())
13
-
14
-
15
- AMIDE_FNS = ["protein_weight", "charge", "charge_density", "isoelectric_point"]
16
- PH_FNS = ["charge", "charge_density", "isoelectric_point"]
17
-
18
-
19
- def main(property: str, seq: str, seq_file: str, amide: bool, ph: float):
20
- prop_name = property.lower()
21
- algo, config = PROTEIN_PROPERTY_PREDICTOR_FACTORY[prop_name]
22
-
23
- # Pass hyperparameters if applicable
24
- kwargs = {}
25
- if prop_name in AMIDE_FNS:
26
- kwargs["amide"] = amide
27
- if prop_name in PH_FNS:
28
- kwargs["ph"] = ph
29
- model = algo(config(**kwargs))
30
-
31
- # Read and parse data
32
- if seq != "" and seq_file is not None:
33
- raise ValueError("Pass either smiles or seq_file, not both.")
34
- elif seq != "":
35
- seqs = [seq]
36
- elif seq_file is not None:
37
- seqs = pd.read_csv(seq_file.name, header=None, sep="\t")[0].tolist()
38
- props = np.array(list(map(model, seqs))).round(2)
39
-
40
- # Expand to 2D array if needed
41
- if len(props.shape) == 1:
42
- props = np.expand_dims(np.array(props), -1)
43
-
44
- return draw_grid_predict(seqs, props, property_names=[property], domain="Proteins")
45
-
46
-
47
- if __name__ == "__main__":
48
- # Preparation (retrieve all available algorithms)
49
- properties = list(PROTEIN_PROPERTY_PREDICTOR_FACTORY.keys())[::-1]
50
- properties = list(map(lambda x: x.capitalize(), properties))
51
-
52
- # Load metadata
53
- metadata_root = pathlib.Path(__file__).parent.joinpath("model_cards")
54
-
55
- examples = [
56
- ["Aliphaticity", "", metadata_root.joinpath("examples.smi"), False, 7],
57
- ["Isoelectric_point", "KFLIYQMECSTMIFGL", None, False, 7],
58
- ["Charge", "KFLIYQMECSTMIFGL", None, True, 12],
59
- ]
60
-
61
- with open(metadata_root.joinpath("article.md"), "r") as f:
62
- article = f.read()
63
- with open(metadata_root.joinpath("description.md"), "r") as f:
64
- description = f.read()
65
-
66
- demo = gr.Interface(
67
- fn=main,
68
- title="Protein properties",
69
- inputs=[
70
- gr.Dropdown(properties, label="Property", value="Instability"),
71
- gr.Textbox(
72
- label="Single Protein sequence", placeholder="KFLIYQMECSTMIFGL", lines=1
73
- ),
74
- gr.File(file_types=[".smi"], label="One AAS per line"),
75
- gr.Radio(choices=[True, False], label="Amide", value=True),
76
- gr.Slider(minimum=0, maximum=14, value=7, label="pH", description="Blub"),
77
- ],
78
- outputs=gr.HTML(label="Output"),
79
- article=article,
80
- description=description,
81
- examples=examples,
82
- )
83
- demo.launch(debug=True, show_error=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/GaenKoki/voicevox/test/test_preset.py DELETED
@@ -1,303 +0,0 @@
1
- from os import remove
2
- from pathlib import Path
3
- from shutil import copyfile
4
- from tempfile import TemporaryDirectory
5
- from unittest import TestCase
6
-
7
- from voicevox_engine.preset import Preset, PresetError, PresetManager
8
-
9
-
10
- class TestPresetManager(TestCase):
11
- def setUp(self):
12
- self.tmp_dir = TemporaryDirectory()
13
- self.tmp_dir_path = Path(self.tmp_dir.name)
14
-
15
- def tearDown(self):
16
- self.tmp_dir.cleanup()
17
-
18
- def test_validation(self):
19
- preset_manager = PresetManager(preset_path=Path("test/presets-test-1.yaml"))
20
- presets = preset_manager.load_presets()
21
- self.assertFalse(presets is None)
22
-
23
- def test_validation_same(self):
24
- preset_manager = PresetManager(preset_path=Path("test/presets-test-1.yaml"))
25
- presets = preset_manager.load_presets()
26
- presets2 = preset_manager.load_presets()
27
- self.assertFalse(presets is None)
28
- self.assertEqual(presets, presets2)
29
-
30
- def test_validation_2(self):
31
- preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml"))
32
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"):
33
- preset_manager.load_presets()
34
-
35
- def test_preset_id(self):
36
- preset_manager = PresetManager(preset_path=Path("test/presets-test-3.yaml"))
37
- with self.assertRaises(PresetError, msg="プリセットのidに重複があります"):
38
- preset_manager.load_presets()
39
-
40
- def test_empty_file(self):
41
- preset_manager = PresetManager(preset_path=Path("test/presets-test-4.yaml"))
42
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルが空の内容です"):
43
- preset_manager.load_presets()
44
-
45
- def test_not_exist_file(self):
46
- preset_manager = PresetManager(preset_path=Path("test/presets-dummy.yaml"))
47
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルが見つかりません"):
48
- preset_manager.load_presets()
49
-
50
- def test_add_preset(self):
51
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
52
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
53
- preset_manager = PresetManager(preset_path=temp_path)
54
- preset = Preset(
55
- **{
56
- "id": 10,
57
- "name": "test10",
58
- "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
59
- "style_id": 2,
60
- "speedScale": 1,
61
- "pitchScale": 1,
62
- "intonationScale": 0.5,
63
- "volumeScale": 1,
64
- "prePhonemeLength": 0.1,
65
- "postPhonemeLength": 0.1,
66
- }
67
- )
68
- id = preset_manager.add_preset(preset)
69
- self.assertEqual(id, 10)
70
- self.assertEqual(len(preset_manager.presets), 3)
71
- for _preset in preset_manager.presets:
72
- if _preset.id == id:
73
- self.assertEqual(_preset, preset)
74
- remove(temp_path)
75
-
76
- def test_add_preset_load_failure(self):
77
- preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml"))
78
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"):
79
- preset_manager.add_preset(
80
- Preset(
81
- **{
82
- "id": 1,
83
- "name": "",
84
- "speaker_uuid": "",
85
- "style_id": 0,
86
- "speedScale": 0,
87
- "pitchScale": 0,
88
- "intonationScale": 0,
89
- "volumeScale": 0,
90
- "prePhonemeLength": 0,
91
- "postPhonemeLength": 0,
92
- }
93
- )
94
- )
95
-
96
- def test_add_preset_conflict_id(self):
97
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
98
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
99
- preset_manager = PresetManager(preset_path=temp_path)
100
- preset = Preset(
101
- **{
102
- "id": 2,
103
- "name": "test3",
104
- "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
105
- "style_id": 2,
106
- "speedScale": 1,
107
- "pitchScale": 1,
108
- "intonationScale": 0.5,
109
- "volumeScale": 1,
110
- "prePhonemeLength": 0.1,
111
- "postPhonemeLength": 0.1,
112
- }
113
- )
114
- id = preset_manager.add_preset(preset)
115
- self.assertEqual(id, 3)
116
- self.assertEqual(len(preset_manager.presets), 3)
117
- for _preset in preset_manager.presets:
118
- if _preset.id == id:
119
- self.assertEqual(_preset, preset)
120
- remove(temp_path)
121
-
122
- def test_add_preset_conflict_id2(self):
123
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
124
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
125
- preset_manager = PresetManager(preset_path=temp_path)
126
- preset = Preset(
127
- **{
128
- "id": -1,
129
- "name": "test3",
130
- "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
131
- "style_id": 2,
132
- "speedScale": 1,
133
- "pitchScale": 1,
134
- "intonationScale": 0.5,
135
- "volumeScale": 1,
136
- "prePhonemeLength": 0.1,
137
- "postPhonemeLength": 0.1,
138
- }
139
- )
140
- id = preset_manager.add_preset(preset)
141
- self.assertEqual(id, 3)
142
- self.assertEqual(len(preset_manager.presets), 3)
143
- for _preset in preset_manager.presets:
144
- if _preset.id == id:
145
- self.assertEqual(_preset, preset)
146
- remove(temp_path)
147
-
148
- def test_add_preset_write_failure(self):
149
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
150
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
151
- preset_manager = PresetManager(preset_path=temp_path)
152
- preset = Preset(
153
- **{
154
- "id": 10,
155
- "name": "test10",
156
- "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
157
- "style_id": 2,
158
- "speedScale": 1,
159
- "pitchScale": 1,
160
- "intonationScale": 0.5,
161
- "volumeScale": 1,
162
- "prePhonemeLength": 0.1,
163
- "postPhonemeLength": 0.1,
164
- }
165
- )
166
- preset_manager.load_presets()
167
- preset_manager.load_presets = lambda: []
168
- preset_manager.preset_path = ""
169
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルに書き込み失敗しました"):
170
- preset_manager.add_preset(preset)
171
- self.assertEqual(len(preset_manager.presets), 2)
172
- remove(temp_path)
173
-
174
- def test_update_preset(self):
175
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
176
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
177
- preset_manager = PresetManager(preset_path=temp_path)
178
- preset = Preset(
179
- **{
180
- "id": 1,
181
- "name": "test1 new",
182
- "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
183
- "style_id": 2,
184
- "speedScale": 1,
185
- "pitchScale": 1,
186
- "intonationScale": 0.5,
187
- "volumeScale": 1,
188
- "prePhonemeLength": 0.1,
189
- "postPhonemeLength": 0.1,
190
- }
191
- )
192
- id = preset_manager.update_preset(preset)
193
- self.assertEqual(id, 1)
194
- self.assertEqual(len(preset_manager.presets), 2)
195
- for _preset in preset_manager.presets:
196
- if _preset.id == id:
197
- self.assertEqual(_preset, preset)
198
- remove(temp_path)
199
-
200
- def test_update_preset_load_failure(self):
201
- preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml"))
202
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"):
203
- preset_manager.update_preset(
204
- Preset(
205
- **{
206
- "id": 1,
207
- "name": "",
208
- "speaker_uuid": "",
209
- "style_id": 0,
210
- "speedScale": 0,
211
- "pitchScale": 0,
212
- "intonationScale": 0,
213
- "volumeScale": 0,
214
- "prePhonemeLength": 0,
215
- "postPhonemeLength": 0,
216
- }
217
- )
218
- )
219
-
220
- def test_update_preset_not_found(self):
221
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
222
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
223
- preset_manager = PresetManager(preset_path=temp_path)
224
- preset = Preset(
225
- **{
226
- "id": 10,
227
- "name": "test1 new",
228
- "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
229
- "style_id": 2,
230
- "speedScale": 1,
231
- "pitchScale": 1,
232
- "intonationScale": 0.5,
233
- "volumeScale": 1,
234
- "prePhonemeLength": 0.1,
235
- "postPhonemeLength": 0.1,
236
- }
237
- )
238
- with self.assertRaises(PresetError, msg="更新先のプリセットが存在しません"):
239
- preset_manager.update_preset(preset)
240
- self.assertEqual(len(preset_manager.presets), 2)
241
- remove(temp_path)
242
-
243
- def test_update_preset_write_failure(self):
244
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
245
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
246
- preset_manager = PresetManager(preset_path=temp_path)
247
- preset = Preset(
248
- **{
249
- "id": 1,
250
- "name": "test1 new",
251
- "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff",
252
- "style_id": 2,
253
- "speedScale": 1,
254
- "pitchScale": 1,
255
- "intonationScale": 0.5,
256
- "volumeScale": 1,
257
- "prePhonemeLength": 0.1,
258
- "postPhonemeLength": 0.1,
259
- }
260
- )
261
- preset_manager.load_presets()
262
- preset_manager.load_presets = lambda: []
263
- preset_manager.preset_path = ""
264
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルに書き込み失敗しました"):
265
- preset_manager.update_preset(preset)
266
- self.assertEqual(len(preset_manager.presets), 2)
267
- self.assertEqual(preset_manager.presets[0].name, "test")
268
- remove(temp_path)
269
-
270
- def test_delete_preset(self):
271
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
272
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
273
- preset_manager = PresetManager(preset_path=temp_path)
274
- id = preset_manager.delete_preset(1)
275
- self.assertEqual(id, 1)
276
- self.assertEqual(len(preset_manager.presets), 1)
277
- remove(temp_path)
278
-
279
- def test_delete_preset_load_failure(self):
280
- preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml"))
281
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"):
282
- preset_manager.delete_preset(10)
283
-
284
- def test_delete_preset_not_found(self):
285
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
286
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
287
- preset_manager = PresetManager(preset_path=temp_path)
288
- with self.assertRaises(PresetError, msg="削除対象のプリセットが存在しません"):
289
- preset_manager.delete_preset(10)
290
- self.assertEqual(len(preset_manager.presets), 2)
291
- remove(temp_path)
292
-
293
- def test_delete_preset_write_failure(self):
294
- temp_path = self.tmp_dir_path / "presets-test-temp.yaml"
295
- copyfile(Path("test/presets-test-1.yaml"), temp_path)
296
- preset_manager = PresetManager(preset_path=temp_path)
297
- preset_manager.load_presets()
298
- preset_manager.load_presets = lambda: []
299
- preset_manager.preset_path = ""
300
- with self.assertRaises(PresetError, msg="プリセットの設定ファイルに書き込み失敗しました"):
301
- preset_manager.delete_preset(1)
302
- self.assertEqual(len(preset_manager.presets), 2)
303
- remove(temp_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/models/clip_ling.py DELETED
@@ -1,97 +0,0 @@
1
- import torch.nn as nn
2
- import torch.nn.functional as F
3
-
4
- import cliport.utils.utils as utils
5
- from cliport.models.resnet import IdentityBlock, ConvBlock
6
- from cliport.models.core.unet import Up
7
- from cliport.models.core import fusion
8
- from cliport.models.clip_lingunet_lat import CLIPLingUNetLat
9
-
10
-
11
- class CLIPLing(CLIPLingUNetLat):
12
- """ CLIP RN50 with U-Net skip connections """
13
-
14
- def __init__(self, input_shape, output_dim, cfg, device, preprocess):
15
- super().__init__(input_shape, output_dim, cfg, device, preprocess)
16
-
17
- # def _build_decoder(self):
18
- # # language
19
- # self.lang_fuser1 = fusion.names[self.lang_fusion_type](input_dim=self.input_dim // 2)
20
- # self.lang_fuser2 = fusion.names[self.lang_fusion_type](input_dim=self.input_dim // 4)
21
- # self.lang_fuser3 = fusion.names[self.lang_fusion_type](input_dim=self.input_dim // 8)
22
-
23
- # self.proj_input_dim = 512 if 'word' in self.lang_fusion_type else 1024
24
- # self.lang_proj1 = nn.Linear(self.proj_input_dim, 1024)
25
- # self.lang_proj2 = nn.Linear(self.proj_input_dim, 512)
26
- # self.lang_proj3 = nn.Linear(self.proj_input_dim, 256)
27
-
28
- # # vision
29
- # self.conv1 = nn.Sequential(
30
- # nn.Conv2d(self.input_dim, 1024, kernel_size=3, stride=1, padding=1, bias=False),
31
- # nn.ReLU(True)
32
- # )
33
-
34
- # self.up1 = Up(2048, 1024 // self.up_factor, self.bilinear)
35
-
36
- # self.up2 = Up(1024, 512 // self.up_factor, self.bilinear)
37
-
38
- # self.up3 = Up(512, 256 // self.up_factor, self.bilinear)
39
-
40
- # self.layer1 = nn.Sequential(
41
- # ConvBlock(128, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
42
- # IdentityBlock(64, [64, 64, 64], kernel_size=3, stride=1, batchnorm=self.batchnorm),
43
- # nn.UpsamplingBilinear2d(scale_factor=2),
44
- # )
45
-
46
- # self.layer2 = nn.Sequential(
47
- # ConvBlock(64, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm),
48
- # IdentityBlock(32, [32, 32, 32], kernel_size=3, stride=1, batchnorm=self.batchnorm),
49
- # nn.UpsamplingBilinear2d(scale_factor=2),
50
- # )
51
-
52
- # self.layer3 = nn.Sequential(
53
- # ConvBlock(32, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm),
54
- # IdentityBlock(16, [16, 16, 16], kernel_size=3, stride=1, batchnorm=self.batchnorm),
55
- # nn.UpsamplingBilinear2d(scale_factor=2),
56
- # )
57
-
58
- del self.lang_fuser2, self.lang_fuser1, self.lang_proj1, self.lang_proj2, self.layer2, self.layer1, self.layer3
59
-
60
- self.conv2 = nn.Sequential(
61
- nn.Conv2d(128, self.output_dim, kernel_size=1)
62
- )
63
-
64
-
65
-
66
-
67
- def forward(self, x, lat, l):
68
- x = self.preprocess(x, dist='clip')
69
-
70
- in_type = x.dtype
71
- in_shape = x.shape
72
- x = x[:,:3] # select RGB
73
- x, im = self.encode_image(x)
74
- x = x.to(in_type)
75
-
76
- l_enc, l_emb, l_mask = self.encode_text(l)
77
- l_input = l_emb if 'word' in self.lang_fusion_type else l_enc
78
- l_input = l_input.to(dtype=x.dtype)
79
-
80
- assert x.shape[1] == self.input_dim
81
- x = self.conv1(x)
82
-
83
- # x = self.lang_fuser1(x, l_input, x2_mask=l_mask, x2_proj=self.lang_proj1)
84
- # x = self.up1(x, im[-2])
85
- # x = self.lat_fusion1(x, lat[-6])
86
-
87
- # x = self.lang_fuser2(x, l_input, x2_mask=l_mask, x2_proj=self.lang_proj2)
88
- # x = self.up2(x, im[-3])
89
- # x = self.lat_fusion2(x, lat[-5])
90
-
91
- x = self.lang_fuser3(x, l_input, x2_mask=l_mask, x2_proj=self.lang_proj3)
92
- x = self.up3(x, im[-4])
93
- x = self.lat_fusion3(x, lat[1])
94
- x = self.conv2(x)
95
-
96
- x = F.interpolate(x, size=(in_shape[-2], in_shape[-1]), mode='bilinear')
97
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gradio-Blocks/uniformer_image_detection/configs/hrnet/cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py DELETED
@@ -1,39 +0,0 @@
1
- _base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w32',
4
- backbone=dict(
5
- _delete_=True,
6
- type='HRNet',
7
- extra=dict(
8
- stage1=dict(
9
- num_modules=1,
10
- num_branches=1,
11
- block='BOTTLENECK',
12
- num_blocks=(4, ),
13
- num_channels=(64, )),
14
- stage2=dict(
15
- num_modules=1,
16
- num_branches=2,
17
- block='BASIC',
18
- num_blocks=(4, 4),
19
- num_channels=(32, 64)),
20
- stage3=dict(
21
- num_modules=4,
22
- num_branches=3,
23
- block='BASIC',
24
- num_blocks=(4, 4, 4),
25
- num_channels=(32, 64, 128)),
26
- stage4=dict(
27
- num_modules=3,
28
- num_branches=4,
29
- block='BASIC',
30
- num_blocks=(4, 4, 4, 4),
31
- num_channels=(32, 64, 128, 256)))),
32
- neck=dict(
33
- _delete_=True,
34
- type='HRFPN',
35
- in_channels=[32, 64, 128, 256],
36
- out_channels=256))
37
- # learning policy
38
- lr_config = dict(step=[16, 19])
39
- runner = dict(type='EpochBasedRunner', max_epochs=20)