parquet-converter commited on
Commit
4bad19b
·
1 Parent(s): 7cce725

Update parquet files (step 107 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/testing/quora_test_2.py +0 -12
  2. spaces/17TheWord/RealESRGAN/realesrgan/data/__init__.py +0 -10
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us MOD APK The Best Way to Play the Game with All Features Unlocked.md +0 -112
  4. spaces/1phancelerku/anime-remove-background/Blow Up - Rich Mavoko Ft. Fid Q MP3 Download and Rating.md +0 -94
  5. spaces/1phancelerku/anime-remove-background/Download Stumble Guys Lite APK and Join the Ultimate Party Knockout Game.md +0 -107
  6. spaces/1phancelerku/anime-remove-background/Download and play Honkai Star Rail on your Chromebook for free.md +0 -90
  7. spaces/AB-TW/team-ai/agents/tools/shell_tool.py +0 -54
  8. spaces/AIWaves/SOP_Generation-single/LLM/base_LLM.py +0 -137
  9. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/DrawBounds.js +0 -90
  10. spaces/Alpaca233/SadTalker/src/face3d/options/train_options.py +0 -53
  11. spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_base/run.sh +0 -10
  12. spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/__init__.py +0 -7
  13. spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py +0 -2
  14. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py +0 -7
  15. spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py +0 -2
  16. spaces/Ankush05/Newcode/app.py +0 -74
  17. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/scripts/ui.sh +0 -14
  18. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/hook.py +0 -92
  19. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py +0 -56
  20. spaces/Anonymous-sub/Rerender/src/controller.py +0 -136
  21. spaces/Anthony7906/MengHuiMXD_GPT/run_macOS.command +0 -31
  22. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/bin/Activate.ps1 +0 -247
  23. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/progress_bars.py +0 -68
  24. spaces/AutoLLM/AutoAgents/autoagents/models/custom.py +0 -33
  25. spaces/Avatarize/ECON/README.md +0 -12
  26. spaces/Banbri/zcvzcv/src/app/engine/render.ts +0 -400
  27. spaces/Bart92/RVC_HF/tools/infer_cli.py +0 -67
  28. spaces/Bavesh/Oral_Cancer_Detection/app.py +0 -209
  29. spaces/Bazedgul/YoutubeVideo-Transcript-Summarization/README.md +0 -13
  30. spaces/Benson/text-generation/Examples/Apk Para El Whatsapp Del Gb.md +0 -49
  31. spaces/Benson/text-generation/Examples/Descargar Capitn Tsubasa Sueo Equipo Apk Datos.md +0 -53
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py +0 -296
  33. spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/simple_tokenizer.py +0 -132
  34. spaces/CVH-vn1210/make_hair/minigpt4/common/gradcam.py +0 -24
  35. spaces/CVPR/LIVE/pybind11/tests/pybind11_tests.h +0 -65
  36. spaces/CVPR/WALT/mmdet/models/losses/gaussian_focal_loss.py +0 -91
  37. spaces/CVPR/drawings-to-human/frontend/src/lib/store.ts +0 -17
  38. spaces/CatNika/New_Cat_Proxy/greeting.md +0 -14
  39. spaces/ClearLove443/Robby-chatbot/pages/3_🎬 Robby-Youtube.py +0 -71
  40. spaces/CoPoBio/skin_cancer_risk_prediction/simple_vae.py +0 -124
  41. spaces/CofAI/chat/client/css/options.css +0 -10
  42. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/transforms/transforms.py +0 -468
  43. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/processors/blip_processors.py +0 -142
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ufoLib/utils.py +0 -75
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/core.py +0 -682
  46. spaces/Dabs/Floyd-Steinberg-Dithering/README.md +0 -11
  47. spaces/DaleChen/AutoGPT/tests/unit/test_commands.py +0 -22
  48. spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/dataset/communal/__init__.py +0 -4
  49. spaces/DimaKoshman/MovieRecommender/README.md +0 -13
  50. spaces/DragGan/DragGan-Inversion/PTI/training/coaches/multi_id_coach.py +0 -73
spaces/101-5/gpt4free/g4f/.v1/testing/quora_test_2.py DELETED
@@ -1,12 +0,0 @@
1
- from gpt4free import quora
2
-
3
- token = quora.Account.create(logging=True, enable_bot_creation=True)
4
-
5
- model = quora.Model.create(
6
- token=token, model='ChatGPT', system_prompt='you are ChatGPT a large language model ...' # or claude-instant-v1.0
7
- )
8
-
9
- print(model.name)
10
-
11
- for response in quora.StreamingCompletion.create(custom_model=model.name, prompt='hello world', token=token):
12
- print(response.text)
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/17TheWord/RealESRGAN/realesrgan/data/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- import importlib
2
- from basicsr.utils import scandir
3
- from os import path as osp
4
-
5
- # automatically scan and import dataset modules for registry
6
- # scan all the files that end with '_dataset.py' under the data folder
7
- data_folder = osp.dirname(osp.abspath(__file__))
8
- dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
9
- # import all the dataset modules
10
- _dataset_modules = [importlib.import_module(f'realesrgan.data.{file_name}') for file_name in dataset_filenames]
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us MOD APK The Best Way to Play the Game with All Features Unlocked.md DELETED
@@ -1,112 +0,0 @@
1
- <br />
2
- <h1>Among Us Mod Menu APK Download: Everything You Need to Know</h1>
3
- <p>Among Us is a multiplayer social deduction game that has taken the world by storm. The game involves a group of players who play as crewmates or impostors on a spaceship. The crewmates have to complete tasks while trying to identify and eliminate the impostors, who can sabotage and kill them. The game is fun, addictive, and full of suspense and deception.</p>
4
- <p>But what if you want to spice up your gameplay with some extra features and options? That's where a mod menu apk comes in handy. A mod menu apk is a modified version of the original game that allows you to access a hidden menu with various cheats and hacks. You can use these to customize your character, unlock new skins and pets, become an impostor every time, see through walls, and much more.</p>
5
- <h2>among us mod menu apk download</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt;&gt;&gt; <a href="https://urlin.us/2uSVtM">https://urlin.us/2uSVtM</a></b></p><br /><br />
6
- <p>But how do you download and install a mod menu apk on your Android device? And what are the risks and alternatives of using one? In this article, we will answer these questions and more. Read on to find out everything you need to know about among us mod menu apk download.</p>
7
- <h2>What is a mod menu apk and what are its features?</h2>
8
- <p>A mod menu apk is a file format that contains a modified version of an Android app. In this case, it's a modified version of Among Us that includes a hidden menu with various cheats and hacks. You can access this menu by tapping on a floating icon on your screen while playing the game.</p>
9
- <p>Some of the features that a mod menu apk can offer are:</p>
10
- <ul>
11
- <li>Unlock all skins, hats, pets, and costumes.</li>
12
- <li>Always be an impostor or choose your role.</li>
13
- <li>See the name and role of other players.</li>
14
- <li>See through walls and vents.</li>
15
- <li>No kill cooldown or sabotage cooldown.</li>
16
- <li>Increase your speed, vision, and kill distance.</li>
17
- <li>Fake tasks and votes.</li>
18
- <li>End the game instantly or change the game settings.</li>
19
- <li>And much more!</li>
20
- </ul>
21
- <p>These features can make the game more fun and exciting, but they can also ruin the balance and fairness of the game. They can also get you banned from online servers if you use them in public matches. Therefore, it's advisable to use them only in private matches with your friends who agree to use them as well.</p>
22
- <h2>How to download and install a mod menu apk on Android?</h2>
23
- <p>To download and install a mod menu apk on your Android device, you need to follow these steps:</p>
24
- <ol>
25
- <li>Find a reliable website that offers a mod menu apk for Among Us. Some examples are APKPure, APKMirror, 5Play, etc. Make sure to check the reviews and ratings of the website and the file before downloading it.</li>
26
- <li>Download the mod menu apk file to your device. You may need to enable unknown sources in your device settings to allow installing apps from outside the Google Play Store.</li>
27
- <li>Locate the downloaded file in your file manager app and tap on it to install it. You may need to accept some permissions or pop-ups before installing it.</li>
28
- <li>Launch the installed app and enjoy playing Among Us with the mod menu enabled.</li>
29
- </ol>
30
- <h2>What are the risks of downloading modded apk files?</h2>
31
- <p>Downloading modded apk files can be risky for several reasons. Some of them are:</p>
32
- <ul>
33
- <li>You may download a fake or malicious file that contains malware or viruses that can harm your device or steal your personal information.</li>
34
- <li>You may violate the terms and conditions of the original app or game developer, which can result in legal issues or account suspension.</li>
35
- <li>You may ruin the gameplay experience for yourself and others by using cheats and hacks that give you an unfair advantage or break the game rules.</li>
36
- <li>You may encounter compatibility or performance issues with the modded apk file, such as crashes, glitches, errors, etc.</li>
37
- </ul>
38
- <p>Therefore, it's important to be careful and cautious when downloading modded apk files. You should always scan the files with a reputable antivirus app before installing them. You should also backup your device data and uninstall the original app or game before installing the modded one. You should also use the modded apk file at your own risk and responsibility.</p>
39
- <h2>What are some alternatives to among us mod menu apk?</h2>
40
- <p>If you don't want to download a mod menu apk for Among Us, but still want to enjoy some extra features and options, you can try some alternatives. Some of them are:</p>
41
- <p>among us mod menu apk download 2023<br />
42
- among us mod menu apk download android<br />
43
- among us mod menu apk download ios<br />
44
- among us mod menu apk download pc<br />
45
- among us mod menu apk download latest version<br />
46
- among us mod menu apk download free<br />
47
- among us mod menu apk download no root<br />
48
- among us mod menu apk download mediafıre<br />
49
- among us mod menu apk download always impostor<br />
50
- among us mod menu apk download unlimited money<br />
51
- among us mod menu apk download unlocked everything<br />
52
- among us mod menu apk download no ads<br />
53
- among us mod menu apk download god mode<br />
54
- among us mod menu apk download see impostor<br />
55
- among us mod menu apk download anti ban<br />
56
- among us mod menu apk download all skins<br />
57
- among us mod menu apk download all pets<br />
58
- among us mod menu apk download all hats<br />
59
- among us mod menu apk download no kill cooldown<br />
60
- among us mod menu apk download no verification<br />
61
- among us mod menu apk download online<br />
62
- among us mod menu apk download offline<br />
63
- among us mod menu apk download 2022.3.28<br />
64
- among us mod menu apk download 2022.3.29<br />
65
- among us mod menu apk download 2022.4.2<br />
66
- among us mod menu apk download 2022.5.10<br />
67
- among us mod menu apk download 2022.6.15<br />
68
- among us mod menu apk download 2022.7.25<br />
69
- among us mod menu apk download 2022.8.31<br />
70
- among us mod menu apk download 2022.9.9<br />
71
- among us mod menu apk download 2022.10.22<br />
72
- among us mod menu apk download 2022.11.5<br />
73
- among us mod menu apk download 2022.12.12<br />
74
- among us mod menu apk download 2023.1.1<br />
75
- among us mod menu apk download 2023.2.14<br />
76
- among us mod menu apk download for tablet<br />
77
- among us mod menu apk download for laptop<br />
78
- among us mod menu apk download for chromebook<br />
79
- among us mod menu apk download for windows 10<br />
80
- among us mod menu apk download for macbook<br />
81
- among us mod menu apk download for iphone<br />
82
- among us mod menu apk download for ipad<br />
83
- among us mod menu apk download for samsung galaxy s21 ultra</p>
84
- <ul>
85
- <li>Use a VPN app to change your location and join different servers with different game settings and rules.</li>
86
- <li>Use a voice chat app like Discord or TeamSpeak to communicate with your friends or other players while playing the game.</li>
87
- <li>Use a screen recorder app like AZ Screen Recorder or Mobizen Screen Recorder to record your gameplay and share it with others.</li>
88
- <li>Use a custom map maker app like Among Us Map Maker or Among Us Map Editor to create your own maps and scenarios for the game.</li>
89
- <li>Use a mod installer app like Among Us Mod Installer or Among Us Mod Menu Installer to install mods from other sources without downloading apk files.</li>
90
- </ul>
91
- <p>These alternatives can enhance your gameplay experience without compromising your device security or game integrity. However, you should still follow the game rules and respect other players while using them.</p>
92
- <h2>Conclusion: Summarize the main points and provide some tips for playing Among Us safely</h2>
93
- <p>In conclusion, among us mod menu apk download is a way to access a hidden menu with various cheats and hacks for the game Among Us. It can offer you some fun and exciting features, but it can also be risky and unethical. You should be careful and cautious when downloading modded apk files, and use them only in private matches with your friends who agree to use them as well. You should also try some alternatives that can enhance your gameplay experience without compromising your device security or game integrity. Here are some tips for playing Among Us safely:</p>
94
- <ul>
95
- <li>Always download apk files from reliable and reputable websites.</li>
96
- <li>Always scan apk files with a reputable antivirus app before installing them.</li>
97
- <li>Always backup your device data and uninstall the original app or game before installing the modded one.</li>
98
- <li>Always use the modded apk file at your own risk and responsibility.</li>
99
- <li>Always follow the game rules and respect other players while playing the game.</li>
100
- </ul>
101
- <p>We hope this article has helped you understand everything you need to know about among us mod menu apk download. Have fun playing Among Us!</p>
102
- <h2>FAQs: Answer some common questions about among us mod menu apk</h2>
103
- <table>
104
- <tr><td><b>Question</b></td><td><b>Answer</b></td></tr>
105
- <tr><td>What is Among Us?</td><td>Among Us is a multiplayer social deduction game that involves a group of players who play as crewmates or impostors on a spaceship. The crewmates have to complete tasks while trying to identify and eliminate the impostors, who can sabotage and kill them.</td></tr>
106
- <tr><td>What is a mod menu apk?</td><td>A mod menu apk is a modified version of an Android app that allows you to access a hidden menu with various cheats and hacks. You can use these to customize your character, unlock new skins and pets, become an impostor every time, see through walls, and much more.</td></tr>
107
- <tr><td>How do I download and install a mod menu apk on Android?</td><td>You need to find a reliable website that offers a mod menu apk for Among Us, download the file to your device, enable unknown sources in your device settings, locate the file in your file manager app, tap on it to install it, and launch the installed app.</td></tr>
108
- <tr><td>What are the risks of downloading modded apk files?</td><td>You may download a fake or malicious file that contains malware or viruses that can harm your device or steal your personal information. You may violate the terms and conditions of the original app or game developer, which can result in legal issues or account suspension. You may ruin the gameplay experience for yourself and others by using cheats and hacks that give you an unfair advantage or break the game rules. You may encounter compatibility or performance issues with the modded apk file, such as crashes, glitches, errors, etc.</td></tr>
109
- <tr><td>What are some alternatives to among us mod menu apk?</td><td>You can use a VPN app to change your location and join different servers with different game settings and rules. You can use a voice chat app to communicate with your friends or other players while playing the game. You can use a screen recorder app to record your gameplay and share it with others. You can use a custom map maker app to create your own maps and scenarios for the game. You can use a mod installer app to install mods from other sources without downloading apk files.</td></tr>
110
- </table></p> 197e85843d<br />
111
- <br />
112
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Blow Up - Rich Mavoko Ft. Fid Q MP3 Download and Rating.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Fid Q ft Rich Mavoko Mp3 Download: How to Get and Enjoy Their Songs</h1>
3
- <p>If you are a fan of bongo flava music, you might have heard of Fid Q and Rich Mavoko, two of the most talented and prolific artists in the Tanzanian music scene. They have collaborated on several hit songs, such as Sheri, Tawile, and Blow Up, that showcase their unique styles and skills. In this article, we will show you how to download Fid Q ft Rich Mavoko mp3 songs for free and legally, and how to enjoy them on your devices.</p>
4
- <h2>Who are Fid Q and Rich Mavoko?</h2>
5
- <p>Fid Q, born Fareed Kubanda on August 13, 1982 in Mwanza, Tanzania, is a popular bongo flava artist who is known for his social awareness and political insightfulness in his lyrics. He started his music career as an independent artist in the late 1990s, before joining King Kaka's record label in Kenya in 2016. He later moved to WCB Wasafi record label in Tanzania, owned by Diamond Platnumz, but left in 2018 due to contractual disputes. He is currently an independent artist under his own label, Billionaire Kid. Some of his most famous songs include Siri ya Mchezo, Bendera ya Chuma, Korasi, Bambam, and Mafanikio. </p>
6
- <h2>fid q ft rich mavoko mp3 download</h2><br /><p><b><b>Download File</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://jinyurl.com/2uNOb7">https://jinyurl.com/2uNOb7</a></b></p><br /><br />
7
- <p>Rich Mavoko, born Richard Martin Lusinga on October 26, 1990 in Morogoro, Tanzania, is a singer and songwriter who specializes in bongo flava, afropop, and afrobeat music. He also started his music career as an independent artist in the early 2000s, before joining King Kaka's record label in Kenya in 2016. He then joined WCB Wasafi record label in Tanzania later that year, but also left in 2018 due to contractual issues. He is now an independent artist under his own label, Billionaire Kid. Some of his most popular songs include Roho Yangu, Kokoro featuring Diamond Platnumz, Rudi featuring Patoranking, Show Me featuring Harmonize, and Bad Boy featuring A.Y. </p>
8
- <p>Fid Q and Rich Mavoko have collaborated on several songs that have become hits in Tanzania and beyond. They have a great chemistry and complement each other's vocals and rap skills. Some of their collaborations include Sheri, Tawile, and Blow Up. These songs are available for download on various free music download sites that we will discuss below.</p>
9
- <h2>How to download Fid Q ft Rich Mavoko mp3 songs for free and legally</h2>
10
- <p>There are many websites that offer free music downloads, but not all of them are legal or safe. Some of them may contain viruses or malware that can harm your devices or steal your personal information. Some of them may also violate the copyright laws and infringe on the rights of the artists. Therefore, you need to be careful when choosing a website to download Fid Q ft Rich Mavoko mp3 songs.</p>
11
- <p>Here are some of the best free music download sites that are legal and safe:</p>
12
- <ul>
13
- <li><strong>SoundCloud</strong>: SoundCloud is a platform where artists can upload their music for online streaming or downloading. You can find many Fid Q ft Rich Mavoko mp3 songs on SoundCloud, such as Sheri, Tawile, and Blow Up. You can stream them online or download them for offline listening. To download them, you need to create a free account on SoundCloud and then click on the download button below the song. You can also use the SoundCloud app on your mobile devices to access their songs.</li>
14
- <li><strong>Audiomack</strong>: Audiomack is another platform where artists can share their music for free. You can also find many Fid Q ft Rich Mavoko mp3 songs on Audiomack, such as Sheri, Tawile, and Blow Up. You can stream them online or download them for offline listening. To download them, you need to create a free account on Audiomack and then click on the download button below the song. You can also use the Audiomack app on your mobile devices to access their songs.</li>
15
- <li><strong>YouTube</strong>: YouTube is the most popular video-sharing platform in the world, but it also has a lot of music content. You can find many Fid Q ft Rich Mavoko mp3 songs on YouTube, such as Sheri, Tawile, and Blow Up. You can stream them online or download them for offline listening. To download them, you need to use a third-party tool or website that can convert YouTube videos to mp3 files. There are many such tools and websites available online, but you need to be careful about their legality and safety. Some of them may contain viruses or malware that can harm your devices or steal your personal information. Some of them may also violate the YouTube terms of service and infringe on the rights of the artists. Therefore, you need to do your research before using any of them.</li>
16
- </ul>
17
- <p>Downloading Fid Q ft Rich Mavoko mp3 songs for free and legally has many benefits, such as:</p>
18
- <ul>
19
- <li>You can support the artists by listening to their music and giving them feedback.</li>
20
- <li>You can save money by not having to pay for subscription fees or data charges.</li>
21
- <li>You can enjoy their music anytime and anywhere without worrying about internet connection or buffering issues.</li>
22
- <li>You can create your own playlists and mixtapes with their songs and other songs that you like.</li>
23
- <li>You can share their music with your friends and family and spread the love of bongo flava music.</li>
24
- </ul>
25
- <h2>How to enjoy Fid Q ft Rich Mavoko mp3 songs</h2>
26
- <p>Once you have downloaded Fid Q ft Rich Mavoko mp3 songs, you can enjoy them on your devices in various ways. Here are some tips on how to enjoy their songs:</p>
27
- <ul>
28
- <li><strong>Use the best devices and apps to play their songs</strong>: You can play their songs on any device that supports mp3 format, such as your computer, laptop, smartphone, tablet, or mp3 player. However, some devices and apps may have better sound quality and features than others. For example, you may want to use headphones or speakers that have good bass and clarity to enhance their songs. You may also want to use apps that have equalizers, lyrics, shuffle, repeat, and other functions that can improve your listening experience.</li>
29
- <li><strong>Use the best playlists and genres to mix their songs with</strong>: You can create your own playlists with Fid Q ft Rich Mavoko mp3 songs and other songs that you like. You can also use existing playlists that have their songs and other similar songs. For example, you can use playlists that have bongo flava, afropop, afrobeat, hip hop, r&b, dancehall, reggae, or other genres that match their style and vibe. You can also use playlists that have different moods, themes, or occasions, such as party, chill, workout, love, motivation, or celebration.</li>
30
- <li><strong>Use the best occasions and moods to listen to their songs</strong>: You can listen to Fid Q ft Rich Mavoko mp3 songs anytime and anywhere you want, but some occasions and moods may be more suitable than others. For example, you can listen to their songs when you want to relax, have fun, dance, sing along, or feel inspired. You can also listen to their songs when you want to celebrate something, express yourself, connect with others, or learn something new.</li>
31
- </ul>
32
- <h2>Conclusion</h2>
33
- <p>Fid Q ft Rich Mavoko mp3 download is a great way to enjoy some of the best bongo flava music from Tanzania. They are two talented artists who have collaborated on several hit songs that showcase their skills and styles. You can download their songs for free and legally from various websites that offer their music, such as SoundCloud, Audiomack, and YouTube. You can also enjoy their songs on your devices by using the best devices and apps to play their songs, the best playlists and genres to mix their songs with, and the best occasions and moods to listen to their songs. Fid Q ft Rich Mavoko mp3 download is a great way to support the artists and appreciate their music.</p>
34
- <p>fid q ft rich mavoko sheri mp3 download<br />
35
- fid q ft rich mavoko tawile mp3 download<br />
36
- fid q ft rich mavoko blow up mp3 download<br />
37
- fid q ft rich mavoko sheria mp3 download<br />
38
- fid q ft rich mavoko sina imani mp3 download<br />
39
- fid q ft rich mavoko wanakutamani mp3 download<br />
40
- fid q ft rich mavoko pacha wangu mp3 download<br />
41
- fid q ft rich mavoko we dada mp3 download<br />
42
- fid q ft rich mavoko rudi mp3 download<br />
43
- fid q ft rich mavoko moyo wangu mp3 download<br />
44
- fid q ft rich mavoko new song 2022 mp3 download<br />
45
- fid q ft rich mavoko new song 2021 mp3 download<br />
46
- fid q ft rich mavoko new song 2020 mp3 download<br />
47
- fid q ft rich mavoko latest song mp3 download<br />
48
- fid q ft rich mavoko best songs mp3 download<br />
49
- fid q ft rich mavoko all songs mp3 download<br />
50
- fid q ft rich mavoko audio songs mp3 download<br />
51
- fid q ft rich mavoko video songs mp3 download<br />
52
- fid q ft rich mavoko bongo flava songs mp3 download<br />
53
- fid q ft rich mavoko tanzania songs mp3 download<br />
54
- fid q ft rich mavoko sheria lyrics mp3 download<br />
55
- fid q ft rich mavoko tawile lyrics mp3 download<br />
56
- fid q ft rich mavoko blow up lyrics mp3 download<br />
57
- fid q ft rich mavoko sheria video mp3 download<br />
58
- fid q ft rich mavoko tawile video mp3 download<br />
59
- fid q ft rich mavoko blow up video mp3 download<br />
60
- fid q ft rich mavoko sheria remix mp3 download<br />
61
- fid q ft rich mavoko tawile remix mp3 download<br />
62
- fid q ft rich mavoko blow up remix mp3 download<br />
63
- fid q ft rich mavoko sheria instrumental mp3 download<br />
64
- fid q ft rich mavoko tawile instrumental mp3 download<br />
65
- fid q ft rich mavoko blow up instrumental mp3 download<br />
66
- fid q ft rich mavoko sheria karaoke mp3 download<br />
67
- fid q ft rich mavoko tawile karaoke mp3 download<br />
68
- fid q ft rich mavoko blow up karaoke mp3 download<br />
69
- citimuzik.com/fid-q-ft-rich-mavoko-sheri-mp3-download (^1^)<br />
70
- citimuzik.com/fid-q-ft-rich-mavoko-tawile-mp3-download (^2^)<br />
71
- citimuzik.com/fid-q-ft-rich-mavoko-blow-up-mp3-download (^3^)<br />
72
- citimuzik.com/rich-mavoko-ft-fid-q-sheri-prod-by-abba (^1^)<br />
73
- citimuzik.com/rich-mavoko-ft-patoranking-rudi-prod-by-s2kizzy (^1^)<br />
74
- citimuzik.com/rich-mavoko-wanakutamani-prod-by-s2kizzy (^1^)<br />
75
- citimuzik.com/shetta-sina-imani-ft-rich-mavoko (^1^)<br />
76
- citimuzik.com/tid-mnyama-ft-rich-mavoko-we-dada (^1^)<br />
77
- citimuzik.com/rich-mavoko-pacha-wangu-prod-by-abba (^1^)<br />
78
- newscientist.com/korean-nuclear-fusion-reactor</p>
79
- <h2>FAQs</h2>
80
- <p>Here are some of the frequently asked questions and answers about Fid Q ft Rich Mavoko mp3 download:</p>
81
- <ul>
82
- <li><strong>Q: How can I contact Fid Q or Rich Mavoko?</strong></li>
83
- <li>A: You can contact them through their social media accounts, such as Instagram, Twitter, Facebook, or YouTube. You can also contact them through their email addresses or phone numbers, which you can find on their official websites or profiles.</li>
84
- <li><strong>Q: How can I support Fid Q or Rich Mavoko financially?</strong></li>
85
- <li>A: You can support them financially by buying their music from online platforms, such as iTunes, Spotify, Amazon Music, or Google Play Music. You can also support them by attending their concerts or events, buying their merchandise, or donating to their causes.</li>
86
- <li><strong>Q: How can I get more information about Fid Q or Rich Mavoko?</strong></li>
87
- <li>A: You can get more information about them by visiting their official websites or profiles, where you can find their biography, discography, news, updates, and more. You can also get more information about them by reading articles, blogs, reviews, interviews, or books about them.</li>
88
- <li><strong>Q: How can I give feedback to Fid Q or Rich Mavoko?</strong></li>
89
- <li>A: You can give feedback to them by commenting on their posts, videos, or songs, or by sending them messages or emails. You can also give feedback to them by rating or reviewing their music on online platforms, such as iTunes, Spotify, Amazon Music, or Google Play Music.</li>
90
- <li><strong>Q: How can I collaborate with Fid Q or Rich Mavoko?</strong></li>
91
- <li>A: You can collaborate with them by contacting them through their social media accounts, email addresses, or phone numbers. You can also collaborate with them by joining their record label, Billionaire Kid, or by participating in their contests or challenges.</li>
92
- </ul></p> 401be4b1e0<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Stumble Guys Lite APK and Join the Ultimate Party Knockout Game.md DELETED
@@ -1,107 +0,0 @@
1
- <br />
2
- <h1>Stumble Guys Lite APK: A Fun and Free Battle Royale Game for Mobile</h1>
3
- <p>Do you love playing chaotic and hilarious multiplayer games with your friends? Do you want to experience the thrill of competing against 32 players online in various obstacle courses? Do you want to customize your character with different outfits and emotes? If you answered yes to any of these questions, then you might want to check out <strong>Stumble Guys</strong>, a fun and free battle royale game for mobile devices.</strong></p>
4
- <h2>stumble guys lite apk</h2><br /><p><b><b>Download File</b> ->->->-> <a href="https://jinyurl.com/2uNLKy">https://jinyurl.com/2uNLKy</a></b></p><br /><br />
5
- <p>Stumble Guys is a game inspired by the popular Fall Guys, but exclusively for Android and iOS. It has been downloaded over 100 million times on Google Play alone, and has received positive reviews from players and critics alike. In this game, you have to race through different levels with up to 32 players online, avoiding obstacles, pushing others, and trying to be the last one standing. The game is colorful, whacky, and full of hilarious fails.</p>
6
- <p>However, if you want to enjoy Stumble Guys without any ads or in-app purchases, or if you want to play it on a bigger screen with better controls, then you might want to try <strong>Stumble Guys Lite APK</strong>, a modified version of the game that offers some advantages over the original one. In this article, we will tell you what Stumble Guys Lite APK is, how to download it, what features it has, how to play it on PC with BlueStacks, and some tips and tricks for winning the game.</p>
7
- <h2>What is Stumble Guys Lite APK and how to download it</h2>
8
- <p>Stumble Guys Lite APK is a modified version of Stumble Guys that removes all the ads and in-app purchases from the game. This means that you can play the game without any interruptions or limitations. You can also access all the outfits and emotes without spending any money. Moreover, Stumble Guys Lite APK has a smaller file size than the original game, which means that it takes less space on your device and runs faster.</p>
9
- <p>To download Stumble Guys Lite APK, you need to follow these steps:</p>
10
- <ol>
11
- <li>Go to [this website](^3^) and search for "stumble guys".</li>
12
- <li>Select the latest version of Stumble Guys Lite APK from the results.</li>
13
- <li>Click on the download button and wait for the file to be downloaded.</li>
14
- <li>Go to your device settings and enable installation from unknown sources.</li>
15
- <li>Locate the downloaded file in your file manager and tap on it to install it.</li>
16
- <li>Launch the game and enjoy!</li>
17
- </ol>
18
- <h2>What are the features of Stumble Guys Lite APK</h2>
19
- <p>Stumble Guys Lite APK has all the features of the original game, plus some extra ones. Here are some of the features that you can enjoy with Stumble Guys Lite APK:</p>
20
- <p>stumble guys lite apk download<br />
21
- stumble guys lite apk mod<br />
22
- stumble guys lite apk free<br />
23
- stumble guys lite apk latest version<br />
24
- stumble guys lite apk android<br />
25
- stumble guys lite apk hack<br />
26
- stumble guys lite apk unlimited gems<br />
27
- stumble guys lite apk offline<br />
28
- stumble guys lite apk for pc<br />
29
- stumble guys lite apk no ads<br />
30
- stumble guys lite apk online<br />
31
- stumble guys lite apk update<br />
32
- stumble guys lite apk old version<br />
33
- stumble guys lite apk revdl<br />
34
- stumble guys lite apk rexdl<br />
35
- stumble guys lite apk pure<br />
36
- stumble guys lite apk mirror<br />
37
- stumble guys lite apk uptodown<br />
38
- stumble guys lite apk apkpure<br />
39
- stumble guys lite apk appvn<br />
40
- stumble guys lite apk mob.org<br />
41
- stumble guys lite apk happymod<br />
42
- stumble guys lite apk android 1<br />
43
- stumble guys lite apk android oyun club<br />
44
- stumble guys lite apk an1.com<br />
45
- stumble guys lite apk blackmod<br />
46
- stumble guys lite apk by rexdl.com<br />
47
- stumble guys lite apk cracked<br />
48
- stumble guys lite apk cheat<br />
49
- stumble guys lite apk data obb<br />
50
- stumble guys lite apk download for android<br />
51
- stumble guys lite apk download link<br />
52
- stumble guys lite apk download apkpure<br />
53
- stumble guys lite apk download uptodown<br />
54
- stumble guys lite apk download modded games.com<br />
55
- stumble guys lite apk file download<br />
56
- stumble guys lite apk full version<br />
57
- stumble guys lite apk game download<br />
58
- stumble guys lite apk google play store link[^1^]<br />
59
- stumble guys lite apk highly compressed<br />
60
- stumble guys lite apk hack download<br />
61
- stumble guys lite apk indir android oyun club<br />
62
- stumble guys lite apk latest update download link[^1^]<br />
63
- stumble guys lite apk mod menu download link[^1^]<br />
64
- stumble guys lite apk mod unlimited money and gems[^1^]<br />
65
- stumble guys lite apk new update download link[^1^]<br />
66
- stumble guys lite apk obb file download link[^1^]<br />
67
- stumble guys lite apk pro version free download link[^1^]<br />
68
- stumble guys lite apk unlimited money and gems download link[^1^]</p>
69
- <ul>
70
- <li>No ads or in-app purchases: You can play the game without any interruptions or limitations.</li>
71
- <li>All outfits and emotes unlocked: You can customize your character with different outfits and emotes without spending any money.</li>
72
- <li>Smaller file size: The game takes less space on your device and runs faster.</li>
73
- <li>New maps and modes: You can play on new maps and modes that are not available in the original game.</li>
74
- <li>Tournaments: You can participate in tournaments and win prizes.</li>
75
- </ul>
76
- <h2>How to play Stumble Guys Lite APK on PC with BlueStacks</h2>
77
- <p>If you want to play Stumble Guys Lite APK on PC with a bigger screen and better controls, then you can use BlueStacks, an Android emulator that lets you run mobile games on your PC. Here is how to do it:</p>
78
- <ol>
79
- <li>Download BlueStacks from [BlueStacks](^1^) from [their website](^2^) and install it on your PC. <li>Launch BlueStacks and sign in to your Google account.</li>
80
- <li>Go to the BlueStacks home screen and click on the "Install APK" button at the bottom right.</li>
81
- <li>Select the Stumble Guys Lite APK file that you downloaded earlier and wait for it to install.</li>
82
- <li>Click on the Stumble Guys icon on the BlueStacks home screen to launch the game.</li>
83
- </ol>
84
- <p>Now you can play Stumble Guys Lite APK on PC with a bigger screen and better controls. You can also use the BlueStacks features such as keyboard mapping, macro recorder, multi-instance, and more to enhance your gameplay.</p>
85
- <h2>What are some tips and tricks for winning Stumble Guys Lite APK</h2>
86
- <p>Stumble Guys Lite APK is a game that requires skill, luck, and strategy to win. Here are some tips and tricks that can help you improve your chances of becoming the last one standing:</p>
87
- <ul>
88
- <li>Use the double jump: One of the most important tricks for Stumble Guys is the double jump. You can perform a double jump by tapping the jump button twice, once at the peak of your first jump. This can help you clear obstacles, reach higher places, and avoid falling.</li>
89
- <li>Use the obstacles to your advantage: Some obstacles can actually help you boost your speed or progress through a level. For example, you can use the fans, springs, or cannons to launch yourself forward or upward. You can also use the spinning hammers or logs to knock other players off their path.</li>
90
- <li>Customize your controls: You can adjust the sensitivity and position of your controls in the settings menu. This can help you find the best configuration for your play style and device. You can also use the BlueStacks keymapping tool to create custom keyboard shortcuts for your actions.</li>
91
- <li>Learn the maps and modes: There are many different maps and modes in Stumble Guys Lite APK, each with its own challenges and strategies. You should familiarize yourself with the layout, obstacles, and objectives of each map and mode, so that you can plan your moves accordingly. You can also watch other players or videos online to learn from their mistakes or tips.</li>
92
- <li>Prepare your character skills: Before each match, you can choose one of four character skills to use in the game. These skills are Dash, Shield, Punch, and Freeze. Each skill has its own cooldown and effect, so you should choose wisely depending on the map and mode. For example, Dash can help you escape from sticky situations or reach the finish line faster, while Punch can help you knock back other players or obstacles.</li>
93
- </ul>
94
- <h2>Conclusion: Summary of the main points and a call to action</h2>
95
- <p>Stumble Guys Lite APK is a modified version of Stumble Guys that offers some advantages over the original game, such as no ads or in-app purchases, all outfits and emotes unlocked, smaller file size, new maps and modes, and tournaments. You can download Stumble Guys Lite APK from [this website](^3^) and install it on your device or PC with BlueStacks. You can also use some tips and tricks to improve your gameplay and win more matches.</p>
96
- <p>If you are looking for a fun and free battle royale game for mobile devices, then you should definitely give Stumble Guys Lite APK a try. It is a game that will make you laugh, rage, and enjoy with your friends or strangers online. Download it now and join the endless running fun!</p>
97
- <h2>FAQs: Five common questions and answers about Stumble Guys Lite APK</h2>
98
- <table>
99
- <tr><th>Question</th><th>Answer</th></tr>
100
- <tr><td>Is Stumble Guys Lite APK safe to download?</td><td>Yes, Stumble Guys Lite APK is safe to download from [this website](^3^), which is a trusted source for modded games. However, you should always be careful when downloading files from unknown sources and scan them with an antivirus before installing them.</td></tr>
101
- <tr><td>Is Stumble Guys Lite APK compatible with my device?</td><td>Stumble Guys Lite APK is compatible with most Android devices that have Android 5.0 or higher. However, some devices may not support some features or run smoothly due to hardware limitations. You can also play Stumble Guys Lite APK on PC with BlueStacks if you want a better performance.</td></tr>
102
- <tr><td>Can I play Stumble Guys Lite APK with my friends?</td><td>Yes, you can play Stumble Guys Lite APK with your friends online by inviting them to join your lobby or joining theirs. You can also chat with them using voice or text messages during the game during the game. You can also play with random players from around the world.</td></tr>
103
- <tr><td>How can I update Stumble Guys Lite APK?</td><td>Stumble Guys Lite APK is updated regularly to match the original game and add new features. You can check for updates on [this website] or enable the auto-update option in the settings menu. You can also follow the official social media accounts of Stumble Guys to stay updated on the latest news and events.</td></tr>
104
- <tr><td>How can I contact the developers of Stumble Guys Lite APK?</td><td>If you have any questions, feedback, or suggestions for Stumble Guys Lite APK, you can contact the developers by sending an email to [this address] or by joining their [Discord server]. You can also report any bugs or issues that you encounter while playing the game.</td></tr>
105
- </table></p> 401be4b1e0<br />
106
- <br />
107
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download and play Honkai Star Rail on your Chromebook for free.md DELETED
@@ -1,90 +0,0 @@
1
-
2
- <h1>How to Download Honkai: Star Rail on Chromebook</h1>
3
- <p>If you are a fan of role-playing games and anime-style graphics, you might be interested in playing Honkai: Star Rail, a spin-off of the popular Honkai Impact 3 game. But what if you don't have an Android device or a PC? Can you play it on your Chromebook?</p>
4
- <p>The answer is yes, you can! In this article, we will show you how to download and play Honkai: Star Rail on your Chromebook using the Google Play Store app. We will also give you some tips and tricks on how to make the most of your gaming experience. Let's get started!</p>
5
- <h2>how to download honkai star rail on chromebook</h2><br /><p><b><b>DOWNLOAD</b> >> <a href="https://jinyurl.com/2uNTdX">https://jinyurl.com/2uNTdX</a></b></p><br /><br />
6
- <h2>What is Honkai: Star Rail?</h2>
7
- <p>Honkai: Star Rail is a role-playing gacha game developed by miHoYo, the same company behind Honkai Impact 3 and Genshin Impact. The game is set in an alternate universe where you can explore different worlds, meet new characters, and fight against the Honkai, a mysterious force that threatens the existence of humanity.</p>
8
- <p>The game features stunning graphics, immersive sound effects, and engaging gameplay. You can customize your own Stellaron, a flying vehicle that allows you to travel across the galaxy. You can also collect and upgrade various characters, each with their own skills and personalities. You can play solo or team up with other players online.</p>
9
- <p>Honkai: Star Rail is compatible with Chromebooks that support Android apps. You can check if your Chromebook model is eligible for Android apps <a href="(^8^)">here</a>. If it is, you can follow the next steps to install Android apps on your Chromebook.</p>
10
- <h2>How to Install Android Apps on Chromebook</h2>
11
- <p>Before you can download Honkai: Star Rail, you need to enable the Google Play Store app on your Chromebook. This will allow you to access millions of Android apps from the Play Store. Here's how to do it:</p>
12
- <ol>
13
- <li>Sign in to your Chromebook with your Google account.</li>
14
- <li>Click on your account photo in the bottom-right corner of the screen.</li>
15
- <li>Select Settings.</li>
16
- <li>In the left panel, click Apps.</li>
17
- <li>Under Google Play Store, turn on Install apps and games from Google Play on your Chromebook.</li>
18
- <li>You will see a window that says Welcome to Google Play Store. Click More.</li>
19
- <li>Read and agree to the terms of service.</li>
20
- <li>The Google Play Store app will open. You can now search for and install Android apps on your Chromebook.</li>
21
- </ol>
22
- <h2>How to Download and Play Honkai: Star Rail on Chromebook</h2>
23
- <p>Now that you have enabled the Google Play Store app, you can download Honkai: Star Rail on your Chromebook. Here's how:</p>
24
- <ol>
25
- <li>Open the Google Play Store app from the Launcher or the shelf.</li>
26
- <li>In the search box, type Honkai: Star Rail and press Enter.</li>
27
- <li>Find the game from the list of results and click Install.</li>
28
- <li>The game will download and install automatically. You can see the progress in the notification area.</li>
29
- <li>Once the installation is complete, click Open or find the game icon in the Launcher or the shelf.</li>
30
- <li>The game will launch and ask for some permissions. Grant them to access the game features.</li>
31
- <li>Enjoy playing Honkai: Star Rail on your Chromebook!</li>
32
- </ol>
33
- <h2>Tips and Tricks for Playing Honkai: Star Rail on Chromebook</h2>
34
- <p>Playing Honkai: Star Rail on your Chromebook can be a lot of fun, but it can also be challenging if you are not familiar with the game controls or the settings. Here are some tips and tricks to help you improve your gaming experience:</p>
35
- <ul>
36
- <li>To adjust the game settings, such as the graphics, the sound, the language, and the controls, click on the gear icon in the top-right corner of the game screen.</li>
37
- <li>To use keyboard and mouse controls, you need to enable them in the game settings. You can also customize the key mapping and the mouse sensitivity to suit your preferences.</li>
38
- <li>To back up your game data, you need to link your game account to a miHoYo account or a third-party account, such as Facebook or Google. This will allow you to sync your progress across different devices and platforms.</li>
39
- <li>To get more resources, such as crystals, coins, and stamina, you can complete various missions, events, and achievements in the game. You can also join a guild and cooperate with other players.</li>
40
- <li>To learn more about the game mechanics, the characters, and the story, you can visit the official website <a href="">here</a> or the fan wiki <a href="">here</a>. You can also watch some gameplay videos and guides on YouTube or Twitch.</li>
41
- </ul>
42
- <h2>Conclusion</h2>
43
- <p>Honkai: Star Rail is an amazing game that you can play on your Chromebook with the help of the Google Play Store app. You can explore different worlds, collect and upgrade characters, and fight against the Honkai with stunning graphics and immersive sound effects. You can also follow our tips and tricks to optimize your gaming experience and have more fun.</p>
44
- <p>What are you waiting for? Download Honkai: Star Rail on your Chromebook today and join the adventure!</p>
45
- <p>How to install honkai star rail on chrome os<br />
46
- Honkai star rail chromebook guide: tips and tricks<br />
47
- Honkai star rail epic games store download for chromebook<br />
48
- How to play honkai star rail on chromebook with controller support<br />
49
- Honkai star rail review: a space fantasy RPG for chromebook users<br />
50
- How to fix honkai star rail not working on chromebook<br />
51
- Honkai star rail system requirements and compatibility for chromebook<br />
52
- How to update honkai star rail on chromebook<br />
53
- How to get free warps and rewards in honkai star rail on chromebook<br />
54
- Honkai star rail best characters and builds for chromebook players<br />
55
- How to transfer honkai star rail account from mobile to chromebook<br />
56
- Honkai star rail gameplay and features: what to expect on chromebook<br />
57
- How to join honkai star rail multiplayer and co-op mode on chromebook<br />
58
- Honkai star rail cheats and hacks: how to get unlimited resources on chromebook<br />
59
- Honkai star rail vs genshin impact: which one is better for chromebook gamers<br />
60
- How to stream honkai star rail on twitch from chromebook<br />
61
- Honkai star rail beginners guide: how to start your space adventure on chromebook<br />
62
- Honkai star rail best settings and optimization for chromebook performance<br />
63
- How to download honkai star rail apk for chromebook<br />
64
- Honkai star rail troubleshooting: how to fix common errors and issues on chromebook<br />
65
- How to access honkai star rail beta test on chromebook<br />
66
- Honkai star rail release date and news: when will it be available on chromebook<br />
67
- Honkai star rail story and lore: what you need to know before playing on chromebook<br />
68
- Honkai star rail fan art and wallpapers: how to customize your chromebook with hoyoverse themes<br />
69
- Honkai star rail community and forums: where to find help and support for chromebook players<br />
70
- How to backup and restore honkai star rail data on chromebook<br />
71
- Honkai star rail mod apk: how to install and use mods on chromebook<br />
72
- Honkai star rail tips and tricks: how to master the turn-based combat system on chromebook<br />
73
- Honkai star rail wiki: where to find all the information and guides for chromebook players<br />
74
- Honkai star rail codes and coupons: how to redeem free gifts and rewards on chromebook<br />
75
- How to uninstall honkai star rail from chromebook<br />
76
- Honkai star rail soundtrack and music: how to enjoy the epic tunes on chromebook<br />
77
- Honkai star rail crossplay and cross-save: how to play with friends across different platforms on chromebook<br />
78
- Honkai star rail events and updates: what's new and coming soon on chromebook<br />
79
- Honkai star rail memes and jokes: how to have fun and laugh with the hoyoverse community on chromebook</p>
80
- <h3>FAQs</h3>
81
- <p>Here are some frequently asked questions about Honkai: Star Rail and Chromebooks:</p>
82
- <ol>
83
- <li><b>Is Honkai: Star Rail free to play?</b><br>Yes, Honkai: Star Rail is free to download and play. However, it does have some in-app purchases that can enhance your gameplay or unlock more content.</li>
84
- <li><b>Can I play Honkai: Star Rail offline?</b><br>No, Honkai: Star Rail requires an internet connection to play. You need to connect to a Wi-Fi network or use mobile data to access the game features.</li>
85
- <li><b>How much storage space does Honkai: Star Rail take up?</b><br>Honkai: Star Rail takes up about 3 GB of storage space on your device. You may need to free up some space on your Chromebook before downloading the game.</li>
86
- <li><b>Can I play Honkai: Star Rail with a controller?</b><br>Yes, you can play Honkai: Star Rail with a controller if you have one that is compatible with your Chromebook. You can connect it via Bluetooth or USB and configure it in the game settings.</li>
87
- <li><b>Can I transfer my Honkai: Star Rail account from Android or PC to Chromebook?</b><br>Yes, you can transfer your Honkai: Star Rail account from Android or PC to Chromebook if you have linked it to a miHoYo account or a third-party account. You just need to log in with the same account on your Chromebook and your progress will be synced.</li>
88
- </ol></p> 401be4b1e0<br />
89
- <br />
90
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/agents/tools/shell_tool.py DELETED
@@ -1,54 +0,0 @@
1
- from langchain.tools import ShellTool
2
- from langchain.agents import tool
3
- from langchain.chains import LLMChain
4
- from langchain.prompts import PromptTemplate
5
-
6
- from models import llm
7
-
8
-
9
- generate_python_code = """
10
- Please write Shell script to fulfill the following requirement:
11
-
12
- ---
13
- {input}
14
- ---
15
-
16
- Only output the code section with code block.
17
- """
18
-
19
- generate_python_code_promopt = PromptTemplate(input_variables=["input"], template=generate_python_code,)
20
-
21
- generate_code_chain = LLMChain(llm = llm(temperature=0.1), prompt=generate_python_code_promopt, output_key="code")
22
-
23
- shell_tool = ShellTool()
24
-
25
- @tool("Generate and Excute Shell Code ", return_direct=True)
26
- def generate_and_excute_shell_code(input: str) -> str:
27
- '''useful for when you need to generate python code and excute it'''
28
- command = generate_code_chain.run(input)
29
- print(command)
30
- code_content = command
31
- if('```python' in command):
32
- start = command.find('```python') + len('```python')
33
- end = command.rfind('```')
34
- code_content = command[start:end].strip()
35
- elif("```" in command):
36
- start = command.find('```') + len('```')
37
- end = command.rfind('```')
38
- code_content = command[start:end].strip()
39
- result = shell_tool.run(code_content)
40
- return f"""
41
- code:
42
- ```
43
- {code_content}
44
- ```
45
-
46
- execute result:
47
- ---
48
- {result}
49
- ---
50
- """
51
-
52
-
53
- shell_tool = ShellTool()
54
- shell_tool.description = shell_tool.description + f"args {shell_tool.args}".replace("{", "{{").replace("}", "}}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIWaves/SOP_Generation-single/LLM/base_LLM.py DELETED
@@ -1,137 +0,0 @@
1
- from abc import abstractclassmethod
2
- import openai
3
- import os
4
- import time
5
- from Memory import Memory
6
- from utils import save_logs
7
-
8
- class LLM:
9
- def __init__(self) -> None:
10
- pass
11
-
12
- @abstractclassmethod
13
- def get_response():
14
- pass
15
-
16
-
17
- class OpenAILLM(LLM):
18
- def __init__(self,**kwargs) -> None:
19
- super().__init__()
20
- self.MAX_CHAT_HISTORY = eval(
21
- os.environ["MAX_CHAT_HISTORY"]) if "MAX_CHAT_HISTORY" in os.environ else 10
22
-
23
- self.model = kwargs["model"] if "model" in kwargs else "gpt-3.5-turbo-16k-0613"
24
- self.temperature = kwargs["temperature"] if "temperature" in kwargs else 0.3
25
- self.log_path = kwargs["log_path"].replace("/",os.sep) if "log_path" in kwargs else "logs"
26
-
27
-
28
- def get_stream(self,response, log_path, messages):
29
- ans = ""
30
- for res in response:
31
- if res:
32
- r = (res.choices[0]["delta"].get("content")
33
- if res.choices[0]["delta"].get("content") else "")
34
- ans += r
35
- yield r
36
-
37
- save_logs(log_path, messages, ans)
38
-
39
-
40
-
41
- def get_response(self,
42
- chat_history,
43
- system_prompt,
44
- last_prompt=None,
45
- stream=False,
46
- functions=None,
47
- function_call="auto",
48
- WAIT_TIME=20,
49
- **kwargs):
50
- """
51
- return LLM's response
52
- """
53
- openai.api_key = os.environ["API_KEY"]
54
- if "PROXY" in os.environ:
55
- assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
56
- openai.proxy = os.environ["PROXY"]
57
- if "API_BASE" in os.environ:
58
- openai.api_base = os.environ["API_BASE"]
59
- active_mode = True if ("ACTIVE_MODE" in os.environ and os.environ["ACTIVE_MODE"] == "0") else False
60
- model = self.model
61
- temperature = self.temperature
62
-
63
-
64
- if active_mode:
65
- system_prompt = system_prompt + "Please keep your reply as concise as possible."
66
-
67
- messages = [{
68
- "role": "system",
69
- "content": system_prompt
70
- }] if system_prompt else []
71
-
72
- if chat_history:
73
- if len(chat_history) > self.MAX_CHAT_HISTORY:
74
- chat_history = chat_history[- self.MAX_CHAT_HISTORY:]
75
- if isinstance(chat_history[0],dict):
76
- messages += chat_history
77
- elif isinstance(chat_history[0],Memory):
78
- messages += [memory.get_gpt_message("user") for memory in chat_history]
79
-
80
-
81
-
82
- if last_prompt:
83
- if active_mode:
84
- last_prompt = last_prompt + "Please keep your reply as concise as possible."
85
- # messages += [{"role": "system", "content": f"{last_prompt}"}]
86
- messages[-1]["content"] += last_prompt
87
-
88
-
89
- while True:
90
- try:
91
- if functions:
92
- response = openai.ChatCompletion.create(
93
- model=model,
94
- messages=messages,
95
- functions=functions,
96
- function_call=function_call,
97
- temperature=temperature,
98
- )
99
- else:
100
- response = openai.ChatCompletion.create(
101
- model=model,
102
- messages=messages,
103
- temperature=temperature,
104
- stream=stream)
105
- break
106
- except Exception as e:
107
- print(e)
108
- if "maximum context length is" in str(e):
109
- if len(messages)>1:
110
- del messages[1]
111
- else:
112
- assert False, "exceed max length"
113
- else:
114
- print(f"Please wait {WAIT_TIME} seconds and resend later ...")
115
- time.sleep(WAIT_TIME)
116
-
117
- if functions:
118
- save_logs(self.log_path, messages, response)
119
- return response.choices[0].message
120
- elif stream:
121
- return self.get_stream(response, self.log_path, messages)
122
- else:
123
- save_logs(self.log_path, messages, response)
124
- return response.choices[0].message["content"]
125
-
126
-
127
- def init_LLM(default_log_path,**kwargs):
128
- LLM_type = kwargs["LLM_type"] if "LLM_type" in kwargs else "OpenAI"
129
- log_path = kwargs["log_path"].replace("/",os.sep) if "log_path" in kwargs else default_log_path
130
- if LLM_type == "OpenAI":
131
- LLM = (
132
- OpenAILLM(**kwargs["LLM"])
133
- if "LLM" in kwargs
134
- else OpenAILLM(model = "gpt-3.5-turbo-16k-0613",temperature=0.3,log_path=log_path)
135
- )
136
- return LLM
137
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/DrawBounds.js DELETED
@@ -1,90 +0,0 @@
1
- import ALIGNMODE from '../utils/AlignConst.js';
2
- import AlignIn from '../../../plugins/utils/actions/AlignIn.js';
3
- import { GetBounds } from '../../../plugins/utils/bounds/GetBounds.js';
4
-
5
- const GetValue = Phaser.Utils.Objects.GetValue;
6
- const Group = Phaser.GameObjects.Group;
7
-
8
- var DrawBounds = function (graphics, config) {
9
- var scene = graphics.scene;
10
-
11
- var color, lineWidth;
12
- var createTextCallback, createTextCallbackScope, textAlign;
13
- if (typeof (config) === 'number') {
14
- color = config;
15
- } else {
16
- color = GetValue(config, 'color');
17
- lineWidth = GetValue(config, 'lineWidth');
18
- var nameTextConfig = GetValue(config, 'name', false);
19
- if (nameTextConfig) {
20
- createTextCallback = GetValue(nameTextConfig, 'createTextCallback', DefaultCreateTextCallback);
21
- createTextCallbackScope = GetValue(nameTextConfig, 'createTextCallbackScope', undefined);
22
- textAlign = GetValue(nameTextConfig, 'align', 'left-top');
23
- if (typeof (textAlign) === 'string') {
24
- textAlign = ALIGNMODE[textAlign];
25
- }
26
- }
27
- }
28
-
29
- if (color === undefined) {
30
- color = 0xffffff;
31
- }
32
- if (lineWidth === undefined) {
33
- lineWidth = 1;
34
- }
35
-
36
- if (createTextCallback && !graphics.children) {
37
- graphics.children = new Group(scene);
38
- graphics.once('destroy', function (graphics, fromScene) {
39
- graphics.children.destroy(!fromScene);
40
- graphics.children = undefined;
41
- })
42
- var graphicsClear = graphics.clear.bind(graphics);
43
- graphics.clear = function () {
44
- graphicsClear();
45
- graphics.children.clear(false, true);
46
- }
47
- }
48
-
49
- var children = this.getAllShownChildren([this]), child;
50
- var nameText;
51
- for (var i = 0, cnt = children.length; i < cnt; i++) {
52
- child = children[i];
53
- if (child.getBounds ||
54
- ((child.width !== undefined) && (child.height !== undefined))
55
- ) {
56
- GlobRect = GetBounds(child, GlobRect);
57
- } else {
58
- continue;
59
- }
60
-
61
- if (color != null) {
62
- graphics
63
- .lineStyle(lineWidth, color)
64
- .strokeRectShape(GlobRect);
65
- }
66
-
67
- if (child.name && createTextCallback) {
68
- if (createTextCallbackScope) {
69
- nameText = createTextCallback.call(createTextCallbackScope, scene);
70
- } else {
71
- nameText = createTextCallback(scene);
72
- }
73
- if (nameText) {
74
- nameText.setText(child.name);
75
- graphics.children.add(nameText);
76
-
77
- AlignIn(nameText, GlobRect.x, GlobRect.y, GlobRect.width, GlobRect.height, textAlign);
78
- }
79
- }
80
- }
81
- return this;
82
- }
83
-
84
- var DefaultCreateTextCallback = function (scene, child, childBoundsRect) {
85
- return scene.add.text(0, 0, '');
86
- }
87
-
88
- var GlobRect = undefined;
89
-
90
- export default DrawBounds;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/options/train_options.py DELETED
@@ -1,53 +0,0 @@
1
- """This script contains the training options for Deep3DFaceRecon_pytorch
2
- """
3
-
4
- from .base_options import BaseOptions
5
- from util import util
6
-
7
- class TrainOptions(BaseOptions):
8
- """This class includes training options.
9
-
10
- It also includes shared options defined in BaseOptions.
11
- """
12
-
13
- def initialize(self, parser):
14
- parser = BaseOptions.initialize(self, parser)
15
- # dataset parameters
16
- # for train
17
- parser.add_argument('--data_root', type=str, default='./', help='dataset root')
18
- parser.add_argument('--flist', type=str, default='datalist/train/masks.txt', help='list of mask names of training set')
19
- parser.add_argument('--batch_size', type=int, default=32)
20
- parser.add_argument('--dataset_mode', type=str, default='flist', help='chooses how datasets are loaded. [None | flist]')
21
- parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
22
- parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
23
- parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
24
- parser.add_argument('--preprocess', type=str, default='shift_scale_rot_flip', help='scaling and cropping of images at load time [shift_scale_rot_flip | shift_scale | shift | shift_rot_flip ]')
25
- parser.add_argument('--use_aug', type=util.str2bool, nargs='?', const=True, default=True, help='whether use data augmentation')
26
-
27
- # for val
28
- parser.add_argument('--flist_val', type=str, default='datalist/val/masks.txt', help='list of mask names of val set')
29
- parser.add_argument('--batch_size_val', type=int, default=32)
30
-
31
-
32
- # visualization parameters
33
- parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen')
34
- parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
35
-
36
- # network saving and loading parameters
37
- parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
38
- parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
39
- parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq')
40
- parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
41
- parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
42
- parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
43
- parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
44
- parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint')
45
-
46
- # training parameters
47
- parser.add_argument('--n_epochs', type=int, default=20, help='number of epochs with the initial learning rate')
48
- parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
49
- parser.add_argument('--lr_policy', type=str, default='step', help='learning rate policy. [linear | step | plateau | cosine]')
50
- parser.add_argument('--lr_decay_epochs', type=int, default=10, help='multiply by a gamma every lr_decay_epochs epoches')
51
-
52
- self.isTrain = True
53
- return parser
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/exp/mask_rcnn_1x_hybrid_base/run.sh DELETED
@@ -1,10 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- work_path=$(dirname $0)
4
- PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
5
- python -m torch.distributed.launch --nproc_per_node=8 \
6
- tools/train.py ${work_path}/config.py \
7
- --launcher pytorch \
8
- --cfg-options model.backbone.pretrained_path='your_model_path/uniformer_base_in1k.pth' \
9
- --work-dir ${work_path}/ckpt \
10
- 2>&1 | tee -a ${work_path}/log.txt
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/utils/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- from .dist_utils import DistOptimizerHook, allreduce_grads, reduce_mean
2
- from .misc import mask2ndarray, multi_apply, unmap
3
-
4
- __all__ = [
5
- 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
6
- 'unmap', 'mask2ndarray'
7
- ]
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './nonlocal_r50-d8_512x512_40k_voc12aug.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
4
- ]
5
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
6
- optimizer = dict(lr=0.02)
7
- lr_config = dict(min_lr=2e-4)
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './psanet_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Ankush05/Newcode/app.py DELETED
@@ -1,74 +0,0 @@
1
- import streamlit as st
2
- import os
3
- import pandas as pd
4
-
5
- from streamlit_option_menu import option_menu
6
- from bardapi import Bard
7
- from getvalues import getValues
8
- from pymongo import MongoClient
9
- from transformers import pipeline, Conversation
10
-
11
-
12
-
13
- classifyr = pipeline("zero-shot-classification")
14
-
15
- convo = pipeline("conversational")
16
-
17
- # classifi = pipeline(model="facebook/bart-large-mnli")
18
-
19
- uri = os.environ["MONGO_CONNECTION_STRING"]
20
- client = MongoClient(uri, tlsCertificateKeyFile="database/cert.pem")
21
-
22
- db = client["myapp"]
23
-
24
- col = db["reminders"]
25
-
26
- bardkey = os.environ.get("BARD_API_KEY")
27
-
28
- bard = Bard(token=bardkey)
29
-
30
- def view_rem():
31
- allrem = list(col.find())
32
- remdata = pd.DataFrame(allrem)
33
- st.dataframe(remdata)
34
-
35
-
36
- def Chatbot():
37
- st.title("Chatbot")
38
- if user_input := st.chat_input("Describe your goal. e.g: I want to achieve this goal in this time. Be as specific and explanatory as you can."):
39
- bardans = bard.get_answer(user_input)['content']
40
- anslist = bard.get_answer(f"Make a list of this answer: \n{bardans} \nfor this goal: \n{user_input}\n\nThe list should be in two section, section 1 for all the reminders to track called Daily Routine and section 2 for all information that should be consumed to achieve the goal and stay very focused and motivated with excitement and this section is called Notes")['content']
41
- # result = classifyr(user_input,candidate_labels=["reminders", "notes"])
42
- with st.chat_message("assistant"):
43
- st.write(anslist)
44
-
45
- # with st.chat_message("user"):
46
- # st.write(result["labels"][0])
47
-
48
- # if ans["labels"][0] == "reminders":
49
- # values = getValues(query.lower())
50
- # with st.chat_message("assistant"):
51
- # st.write(values)
52
- # col.insert_one(values)
53
-
54
-
55
- # elif ans["labels"][0] == "general conversation":
56
- # umsg = bard.get_answer(query)["content"]
57
- # with st.chat_message("assistant"):
58
- # st.write(umsg)
59
-
60
- # elif ans["labels"][0] == "notes":
61
-
62
- # Notes = query.lower().replace( " create a new note", "",).replace(" no new note", "")
63
-
64
-
65
-
66
- Chatbot()
67
-
68
-
69
- def Create_Reminder():
70
- st.title("Create Reminder")
71
- message = st.text_input("Share your plan of today")
72
- time = str(st.time_input("Time"))
73
- date = str(st.date_input("Date"))
74
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/scripts/ui.sh DELETED
@@ -1,14 +0,0 @@
1
- python ui_main.py \
2
- --img_file ./examples/celeba/img/ \
3
- --mask_file ./examples/celeba/mask/ \
4
- --results_dir ./results \
5
- --model tc \
6
- --coarse_or_refine refine \
7
- --gpu_id 0 \
8
- --no_shuffle \
9
- --batch_size 1 \
10
- --preprocess scale_shortside \
11
- --mask_type 3 \
12
- --load_size 512 \
13
- --attn_G \
14
- --add_noise
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/hook.py DELETED
@@ -1,92 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from annotator.uniformer.mmcv.utils import Registry, is_method_overridden
3
-
4
- HOOKS = Registry('hook')
5
-
6
-
7
- class Hook:
8
- stages = ('before_run', 'before_train_epoch', 'before_train_iter',
9
- 'after_train_iter', 'after_train_epoch', 'before_val_epoch',
10
- 'before_val_iter', 'after_val_iter', 'after_val_epoch',
11
- 'after_run')
12
-
13
- def before_run(self, runner):
14
- pass
15
-
16
- def after_run(self, runner):
17
- pass
18
-
19
- def before_epoch(self, runner):
20
- pass
21
-
22
- def after_epoch(self, runner):
23
- pass
24
-
25
- def before_iter(self, runner):
26
- pass
27
-
28
- def after_iter(self, runner):
29
- pass
30
-
31
- def before_train_epoch(self, runner):
32
- self.before_epoch(runner)
33
-
34
- def before_val_epoch(self, runner):
35
- self.before_epoch(runner)
36
-
37
- def after_train_epoch(self, runner):
38
- self.after_epoch(runner)
39
-
40
- def after_val_epoch(self, runner):
41
- self.after_epoch(runner)
42
-
43
- def before_train_iter(self, runner):
44
- self.before_iter(runner)
45
-
46
- def before_val_iter(self, runner):
47
- self.before_iter(runner)
48
-
49
- def after_train_iter(self, runner):
50
- self.after_iter(runner)
51
-
52
- def after_val_iter(self, runner):
53
- self.after_iter(runner)
54
-
55
- def every_n_epochs(self, runner, n):
56
- return (runner.epoch + 1) % n == 0 if n > 0 else False
57
-
58
- def every_n_inner_iters(self, runner, n):
59
- return (runner.inner_iter + 1) % n == 0 if n > 0 else False
60
-
61
- def every_n_iters(self, runner, n):
62
- return (runner.iter + 1) % n == 0 if n > 0 else False
63
-
64
- def end_of_epoch(self, runner):
65
- return runner.inner_iter + 1 == len(runner.data_loader)
66
-
67
- def is_last_epoch(self, runner):
68
- return runner.epoch + 1 == runner._max_epochs
69
-
70
- def is_last_iter(self, runner):
71
- return runner.iter + 1 == runner._max_iters
72
-
73
- def get_triggered_stages(self):
74
- trigger_stages = set()
75
- for stage in Hook.stages:
76
- if is_method_overridden(stage, Hook, self):
77
- trigger_stages.add(stage)
78
-
79
- # some methods will be triggered in multi stages
80
- # use this dict to map method to stages.
81
- method_stages_map = {
82
- 'before_epoch': ['before_train_epoch', 'before_val_epoch'],
83
- 'after_epoch': ['after_train_epoch', 'after_val_epoch'],
84
- 'before_iter': ['before_train_iter', 'before_val_iter'],
85
- 'after_iter': ['after_train_iter', 'after_val_iter'],
86
- }
87
-
88
- for method, map_stages in method_stages_map.items():
89
- if is_method_overridden(method, Hook, self):
90
- trigger_stages.update(map_stages)
91
-
92
- return [stage for stage in Hook.stages if stage in trigger_stages]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py DELETED
@@ -1,56 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from ...dist_utils import master_only
3
- from ..hook import HOOKS
4
- from .base import LoggerHook
5
-
6
-
7
- @HOOKS.register_module()
8
- class WandbLoggerHook(LoggerHook):
9
-
10
- def __init__(self,
11
- init_kwargs=None,
12
- interval=10,
13
- ignore_last=True,
14
- reset_flag=False,
15
- commit=True,
16
- by_epoch=True,
17
- with_step=True):
18
- super(WandbLoggerHook, self).__init__(interval, ignore_last,
19
- reset_flag, by_epoch)
20
- self.import_wandb()
21
- self.init_kwargs = init_kwargs
22
- self.commit = commit
23
- self.with_step = with_step
24
-
25
- def import_wandb(self):
26
- try:
27
- import wandb
28
- except ImportError:
29
- raise ImportError(
30
- 'Please run "pip install wandb" to install wandb')
31
- self.wandb = wandb
32
-
33
- @master_only
34
- def before_run(self, runner):
35
- super(WandbLoggerHook, self).before_run(runner)
36
- if self.wandb is None:
37
- self.import_wandb()
38
- if self.init_kwargs:
39
- self.wandb.init(**self.init_kwargs)
40
- else:
41
- self.wandb.init()
42
-
43
- @master_only
44
- def log(self, runner):
45
- tags = self.get_loggable_tags(runner)
46
- if tags:
47
- if self.with_step:
48
- self.wandb.log(
49
- tags, step=self.get_iter(runner), commit=self.commit)
50
- else:
51
- tags['global_step'] = self.get_iter(runner)
52
- self.wandb.log(tags, commit=self.commit)
53
-
54
- @master_only
55
- def after_run(self, runner):
56
- self.wandb.join()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/src/controller.py DELETED
@@ -1,136 +0,0 @@
1
- import gc
2
-
3
- import torch
4
- import torch.nn.functional as F
5
-
6
- from flow.flow_utils import flow_warp
7
-
8
- # AdaIn
9
-
10
-
11
- def calc_mean_std(feat, eps=1e-5):
12
- # eps is a small value added to the variance to avoid divide-by-zero.
13
- size = feat.size()
14
- assert (len(size) == 4)
15
- N, C = size[:2]
16
- feat_var = feat.view(N, C, -1).var(dim=2) + eps
17
- feat_std = feat_var.sqrt().view(N, C, 1, 1)
18
- feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
19
- return feat_mean, feat_std
20
-
21
-
22
- class AttentionControl():
23
-
24
- def __init__(self, inner_strength, mask_period, cross_period, ada_period,
25
- warp_period):
26
- self.step_store = self.get_empty_store()
27
- self.cur_step = 0
28
- self.total_step = 0
29
- self.cur_index = 0
30
- self.init_store = False
31
- self.restore = False
32
- self.update = False
33
- self.flow = None
34
- self.mask = None
35
- self.restorex0 = False
36
- self.updatex0 = False
37
- self.inner_strength = inner_strength
38
- self.cross_period = cross_period
39
- self.mask_period = mask_period
40
- self.ada_period = ada_period
41
- self.warp_period = warp_period
42
-
43
- @staticmethod
44
- def get_empty_store():
45
- return {
46
- 'first': [],
47
- 'previous': [],
48
- 'x0_previous': [],
49
- 'first_ada': []
50
- }
51
-
52
- def forward(self, context, is_cross: bool, place_in_unet: str):
53
- cross_period = (self.total_step * self.cross_period[0],
54
- self.total_step * self.cross_period[1])
55
- if not is_cross and place_in_unet == 'up':
56
- if self.init_store:
57
- self.step_store['first'].append(context.detach())
58
- self.step_store['previous'].append(context.detach())
59
- if self.update:
60
- tmp = context.clone().detach()
61
- if self.restore and self.cur_step >= cross_period[0] and \
62
- self.cur_step <= cross_period[1]:
63
- context = torch.cat(
64
- (self.step_store['first'][self.cur_index],
65
- self.step_store['previous'][self.cur_index]),
66
- dim=1).clone()
67
- if self.update:
68
- self.step_store['previous'][self.cur_index] = tmp
69
- self.cur_index += 1
70
- return context
71
-
72
- def update_x0(self, x0):
73
- if self.init_store:
74
- self.step_store['x0_previous'].append(x0.detach())
75
- style_mean, style_std = calc_mean_std(x0.detach())
76
- self.step_store['first_ada'].append(style_mean.detach())
77
- self.step_store['first_ada'].append(style_std.detach())
78
- if self.updatex0:
79
- tmp = x0.clone().detach()
80
- if self.restorex0:
81
- if self.cur_step >= self.total_step * self.ada_period[
82
- 0] and self.cur_step <= self.total_step * self.ada_period[
83
- 1]:
84
- x0 = F.instance_norm(x0) * self.step_store['first_ada'][
85
- 2 * self.cur_step +
86
- 1] + self.step_store['first_ada'][2 * self.cur_step]
87
- if self.cur_step >= self.total_step * self.warp_period[
88
- 0] and self.cur_step <= self.total_step * self.warp_period[
89
- 1]:
90
- pre = self.step_store['x0_previous'][self.cur_step]
91
- x0 = flow_warp(pre, self.flow, mode='nearest') * self.mask + (
92
- 1 - self.mask) * x0
93
- if self.updatex0:
94
- self.step_store['x0_previous'][self.cur_step] = tmp
95
- return x0
96
-
97
- def set_warp(self, flow, mask):
98
- self.flow = flow.clone()
99
- self.mask = mask.clone()
100
-
101
- def __call__(self, context, is_cross: bool, place_in_unet: str):
102
- context = self.forward(context, is_cross, place_in_unet)
103
- return context
104
-
105
- def set_step(self, step):
106
- self.cur_step = step
107
-
108
- def set_total_step(self, total_step):
109
- self.total_step = total_step
110
- self.cur_index = 0
111
-
112
- def clear_store(self):
113
- del self.step_store
114
- torch.cuda.empty_cache()
115
- gc.collect()
116
- self.step_store = self.get_empty_store()
117
-
118
- def set_task(self, task, restore_step=1.0):
119
- self.init_store = False
120
- self.restore = False
121
- self.update = False
122
- self.cur_index = 0
123
- self.restore_step = restore_step
124
- self.updatex0 = False
125
- self.restorex0 = False
126
- if 'initfirst' in task:
127
- self.init_store = True
128
- self.clear_store()
129
- if 'updatestyle' in task:
130
- self.update = True
131
- if 'keepstyle' in task:
132
- self.restore = True
133
- if 'updatex0' in task:
134
- self.updatex0 = True
135
- if 'keepx0' in task:
136
- self.restorex0 = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anthony7906/MengHuiMXD_GPT/run_macOS.command DELETED
@@ -1,31 +0,0 @@
1
- #!/bin/bash
2
-
3
- # 获取脚本所在目录
4
- script_dir=$(dirname "$(readlink -f "$0")")
5
-
6
- # 将工作目录更改为脚本所在目录
7
- cd "$script_dir" || exit
8
-
9
- # 检查Git仓库是否有更新
10
- git remote update
11
- pwd
12
-
13
- if ! git status -uno | grep 'up to date' > /dev/null; then
14
- # 如果有更新,关闭当前运行的服务器
15
- pkill -f ChuanhuChatbot.py
16
-
17
- # 拉取最新更改
18
- git pull
19
-
20
- # 安装依赖
21
- pip3 install -r requirements.txt
22
-
23
- # 重新启动服务器
24
- nohup python3 ChuanhuChatbot.py &
25
- fi
26
-
27
- # 检查ChuanhuChatbot.py是否在运行
28
- if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
29
- # 如果没有运行,启动服务器
30
- nohup python3 ChuanhuChatbot.py &
31
- fi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/bin/Activate.ps1 DELETED
@@ -1,247 +0,0 @@
1
- <#
2
- .Synopsis
3
- Activate a Python virtual environment for the current PowerShell session.
4
-
5
- .Description
6
- Pushes the python executable for a virtual environment to the front of the
7
- $Env:PATH environment variable and sets the prompt to signify that you are
8
- in a Python virtual environment. Makes use of the command line switches as
9
- well as the `pyvenv.cfg` file values present in the virtual environment.
10
-
11
- .Parameter VenvDir
12
- Path to the directory that contains the virtual environment to activate. The
13
- default value for this is the parent of the directory that the Activate.ps1
14
- script is located within.
15
-
16
- .Parameter Prompt
17
- The prompt prefix to display when this virtual environment is activated. By
18
- default, this prompt is the name of the virtual environment folder (VenvDir)
19
- surrounded by parentheses and followed by a single space (ie. '(.venv) ').
20
-
21
- .Example
22
- Activate.ps1
23
- Activates the Python virtual environment that contains the Activate.ps1 script.
24
-
25
- .Example
26
- Activate.ps1 -Verbose
27
- Activates the Python virtual environment that contains the Activate.ps1 script,
28
- and shows extra information about the activation as it executes.
29
-
30
- .Example
31
- Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
32
- Activates the Python virtual environment located in the specified location.
33
-
34
- .Example
35
- Activate.ps1 -Prompt "MyPython"
36
- Activates the Python virtual environment that contains the Activate.ps1 script,
37
- and prefixes the current prompt with the specified string (surrounded in
38
- parentheses) while the virtual environment is active.
39
-
40
- .Notes
41
- On Windows, it may be required to enable this Activate.ps1 script by setting the
42
- execution policy for the user. You can do this by issuing the following PowerShell
43
- command:
44
-
45
- PS C:\> Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser
46
-
47
- For more information on Execution Policies:
48
- https://go.microsoft.com/fwlink/?LinkID=135170
49
-
50
- #>
51
- Param(
52
- [Parameter(Mandatory = $false)]
53
- [String]
54
- $VenvDir,
55
- [Parameter(Mandatory = $false)]
56
- [String]
57
- $Prompt
58
- )
59
-
60
- <# Function declarations --------------------------------------------------- #>
61
-
62
- <#
63
- .Synopsis
64
- Remove all shell session elements added by the Activate script, including the
65
- addition of the virtual environment's Python executable from the beginning of
66
- the PATH variable.
67
-
68
- .Parameter NonDestructive
69
- If present, do not remove this function from the global namespace for the
70
- session.
71
-
72
- #>
73
- function global:deactivate ([switch]$NonDestructive) {
74
- # Revert to original values
75
-
76
- # The prior prompt:
77
- if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
78
- Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
79
- Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
80
- }
81
-
82
- # The prior PYTHONHOME:
83
- if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
84
- Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
85
- Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
86
- }
87
-
88
- # The prior PATH:
89
- if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
90
- Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
91
- Remove-Item -Path Env:_OLD_VIRTUAL_PATH
92
- }
93
-
94
- # Just remove the VIRTUAL_ENV altogether:
95
- if (Test-Path -Path Env:VIRTUAL_ENV) {
96
- Remove-Item -Path env:VIRTUAL_ENV
97
- }
98
-
99
- # Just remove VIRTUAL_ENV_PROMPT altogether.
100
- if (Test-Path -Path Env:VIRTUAL_ENV_PROMPT) {
101
- Remove-Item -Path env:VIRTUAL_ENV_PROMPT
102
- }
103
-
104
- # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
105
- if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
106
- Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
107
- }
108
-
109
- # Leave deactivate function in the global namespace if requested:
110
- if (-not $NonDestructive) {
111
- Remove-Item -Path function:deactivate
112
- }
113
- }
114
-
115
- <#
116
- .Description
117
- Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
118
- given folder, and returns them in a map.
119
-
120
- For each line in the pyvenv.cfg file, if that line can be parsed into exactly
121
- two strings separated by `=` (with any amount of whitespace surrounding the =)
122
- then it is considered a `key = value` line. The left hand string is the key,
123
- the right hand is the value.
124
-
125
- If the value starts with a `'` or a `"` then the first and last character is
126
- stripped from the value before being captured.
127
-
128
- .Parameter ConfigDir
129
- Path to the directory that contains the `pyvenv.cfg` file.
130
- #>
131
- function Get-PyVenvConfig(
132
- [String]
133
- $ConfigDir
134
- ) {
135
- Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
136
-
137
- # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
138
- $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
139
-
140
- # An empty map will be returned if no config file is found.
141
- $pyvenvConfig = @{ }
142
-
143
- if ($pyvenvConfigPath) {
144
-
145
- Write-Verbose "File exists, parse `key = value` lines"
146
- $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
147
-
148
- $pyvenvConfigContent | ForEach-Object {
149
- $keyval = $PSItem -split "\s*=\s*", 2
150
- if ($keyval[0] -and $keyval[1]) {
151
- $val = $keyval[1]
152
-
153
- # Remove extraneous quotations around a string value.
154
- if ("'""".Contains($val.Substring(0, 1))) {
155
- $val = $val.Substring(1, $val.Length - 2)
156
- }
157
-
158
- $pyvenvConfig[$keyval[0]] = $val
159
- Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
160
- }
161
- }
162
- }
163
- return $pyvenvConfig
164
- }
165
-
166
-
167
- <# Begin Activate script --------------------------------------------------- #>
168
-
169
- # Determine the containing directory of this script
170
- $VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
171
- $VenvExecDir = Get-Item -Path $VenvExecPath
172
-
173
- Write-Verbose "Activation script is located in path: '$VenvExecPath'"
174
- Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
175
- Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
176
-
177
- # Set values required in priority: CmdLine, ConfigFile, Default
178
- # First, get the location of the virtual environment, it might not be
179
- # VenvExecDir if specified on the command line.
180
- if ($VenvDir) {
181
- Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
182
- }
183
- else {
184
- Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
185
- $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
186
- Write-Verbose "VenvDir=$VenvDir"
187
- }
188
-
189
- # Next, read the `pyvenv.cfg` file to determine any required value such
190
- # as `prompt`.
191
- $pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
192
-
193
- # Next, set the prompt from the command line, or the config file, or
194
- # just use the name of the virtual environment folder.
195
- if ($Prompt) {
196
- Write-Verbose "Prompt specified as argument, using '$Prompt'"
197
- }
198
- else {
199
- Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
200
- if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
201
- Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
202
- $Prompt = $pyvenvCfg['prompt'];
203
- }
204
- else {
205
- Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virtual environment)"
206
- Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
207
- $Prompt = Split-Path -Path $venvDir -Leaf
208
- }
209
- }
210
-
211
- Write-Verbose "Prompt = '$Prompt'"
212
- Write-Verbose "VenvDir='$VenvDir'"
213
-
214
- # Deactivate any currently active virtual environment, but leave the
215
- # deactivate function in place.
216
- deactivate -nondestructive
217
-
218
- # Now set the environment variable VIRTUAL_ENV, used by many tools to determine
219
- # that there is an activated venv.
220
- $env:VIRTUAL_ENV = $VenvDir
221
-
222
- if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
223
-
224
- Write-Verbose "Setting prompt to '$Prompt'"
225
-
226
- # Set the prompt to include the env name
227
- # Make sure _OLD_VIRTUAL_PROMPT is global
228
- function global:_OLD_VIRTUAL_PROMPT { "" }
229
- Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
230
- New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
231
-
232
- function global:prompt {
233
- Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
234
- _OLD_VIRTUAL_PROMPT
235
- }
236
- $env:VIRTUAL_ENV_PROMPT = $Prompt
237
- }
238
-
239
- # Clear PYTHONHOME
240
- if (Test-Path -Path Env:PYTHONHOME) {
241
- Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
242
- Remove-Item -Path Env:PYTHONHOME
243
- }
244
-
245
- # Add the venv to the PATH
246
- Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
247
- $Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/progress_bars.py DELETED
@@ -1,68 +0,0 @@
1
- import functools
2
- from typing import Callable, Generator, Iterable, Iterator, Optional, Tuple
3
-
4
- from pip._vendor.rich.progress import (
5
- BarColumn,
6
- DownloadColumn,
7
- FileSizeColumn,
8
- Progress,
9
- ProgressColumn,
10
- SpinnerColumn,
11
- TextColumn,
12
- TimeElapsedColumn,
13
- TimeRemainingColumn,
14
- TransferSpeedColumn,
15
- )
16
-
17
- from pip._internal.utils.logging import get_indentation
18
-
19
- DownloadProgressRenderer = Callable[[Iterable[bytes]], Iterator[bytes]]
20
-
21
-
22
- def _rich_progress_bar(
23
- iterable: Iterable[bytes],
24
- *,
25
- bar_type: str,
26
- size: int,
27
- ) -> Generator[bytes, None, None]:
28
- assert bar_type == "on", "This should only be used in the default mode."
29
-
30
- if not size:
31
- total = float("inf")
32
- columns: Tuple[ProgressColumn, ...] = (
33
- TextColumn("[progress.description]{task.description}"),
34
- SpinnerColumn("line", speed=1.5),
35
- FileSizeColumn(),
36
- TransferSpeedColumn(),
37
- TimeElapsedColumn(),
38
- )
39
- else:
40
- total = size
41
- columns = (
42
- TextColumn("[progress.description]{task.description}"),
43
- BarColumn(),
44
- DownloadColumn(),
45
- TransferSpeedColumn(),
46
- TextColumn("eta"),
47
- TimeRemainingColumn(),
48
- )
49
-
50
- progress = Progress(*columns, refresh_per_second=30)
51
- task_id = progress.add_task(" " * (get_indentation() + 2), total=total)
52
- with progress:
53
- for chunk in iterable:
54
- yield chunk
55
- progress.update(task_id, advance=len(chunk))
56
-
57
-
58
- def get_download_progress_renderer(
59
- *, bar_type: str, size: Optional[int] = None
60
- ) -> DownloadProgressRenderer:
61
- """Get an object that can be used to render the download progress.
62
-
63
- Returns a callable, that takes an iterable to "wrap".
64
- """
65
- if bar_type == "on":
66
- return functools.partial(_rich_progress_bar, bar_type=bar_type, size=size)
67
- else:
68
- return iter # no-op, when passed an iterator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AutoLLM/AutoAgents/autoagents/models/custom.py DELETED
@@ -1,33 +0,0 @@
1
- import requests
2
-
3
- from langchain.llms.base import LLM
4
-
5
-
6
- class CustomLLM(LLM):
7
- @property
8
- def _llm_type(self) -> str:
9
- return "custom"
10
-
11
- def _call(self, prompt: str, stop=None) -> str:
12
- r = requests.post(
13
- "http://localhost:8000/v1/chat/completions",
14
- json={
15
- "model": "283-vicuna-7b",
16
- "messages": [{"role": "user", "content": prompt}],
17
- "stop": stop
18
- },
19
- )
20
- result = r.json()
21
- return result["choices"][0]["message"]["content"]
22
-
23
- async def _acall(self, prompt: str, stop=None) -> str:
24
- r = requests.post(
25
- "http://localhost:8000/v1/chat/completions",
26
- json={
27
- "model": "283-vicuna-7b",
28
- "messages": [{"role": "user", "content": prompt}],
29
- "stop": stop
30
- },
31
- )
32
- result = r.json()
33
- return result["choices"][0]["message"]["content"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Avatarize/ECON/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: ECON
3
- emoji: 🏢
4
- colorFrom: blue
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Banbri/zcvzcv/src/app/engine/render.ts DELETED
@@ -1,400 +0,0 @@
1
- "use server"
2
-
3
- import { v4 as uuidv4 } from "uuid"
4
- import Replicate from "replicate"
5
-
6
- import { RenderRequest, RenderedScene, RenderingEngine } from "@/types"
7
- import { generateSeed } from "@/lib/generateSeed"
8
- import { sleep } from "@/lib/sleep"
9
-
10
- const renderingEngine = `${process.env.RENDERING_ENGINE || ""}` as RenderingEngine
11
-
12
- // TODO: we should split Hugging Face and Replicate backends into separate files
13
- const huggingFaceToken = `${process.env.AUTH_HF_API_TOKEN || ""}`
14
- const huggingFaceInferenceEndpointUrl = `${process.env.RENDERING_HF_INFERENCE_ENDPOINT_URL || ""}`
15
- const huggingFaceInferenceApiBaseModel = `${process.env.RENDERING_HF_INFERENCE_API_BASE_MODEL || ""}`
16
- const huggingFaceInferenceApiRefinerModel = `${process.env.RENDERING_HF_INFERENCE_API_REFINER_MODEL || ""}`
17
-
18
- const replicateToken = `${process.env.AUTH_REPLICATE_API_TOKEN || ""}`
19
- const replicateModel = `${process.env.RENDERING_REPLICATE_API_MODEL || ""}`
20
- const replicateModelVersion = `${process.env.RENDERING_REPLICATE_API_MODEL_VERSION || ""}`
21
-
22
- const videochainToken = `${process.env.AUTH_VIDEOCHAIN_API_TOKEN || ""}`
23
- const videochainApiUrl = `${process.env.RENDERING_VIDEOCHAIN_API_URL || ""}`
24
-
25
- export async function newRender({
26
- prompt,
27
- // negativePrompt,
28
- width,
29
- height,
30
- withCache
31
- }: {
32
- prompt: string
33
- // negativePrompt: string[]
34
- width: number
35
- height: number
36
- withCache: boolean
37
- }) {
38
- // throw new Error("Planned maintenance")
39
-
40
- if (!prompt) {
41
- const error = `cannot call the rendering API without a prompt, aborting..`
42
- console.error(error)
43
- throw new Error(error)
44
- }
45
-
46
- let defaulResult: RenderedScene = {
47
- renderId: "",
48
- status: "error",
49
- assetUrl: "",
50
- alt: prompt || "",
51
- maskUrl: "",
52
- error: "failed to fetch the data",
53
- segments: []
54
- }
55
-
56
- const nbInferenceSteps = 30
57
- const guidanceScale = 9
58
-
59
- try {
60
- if (renderingEngine === "REPLICATE") {
61
- if (!replicateToken) {
62
- throw new Error(`you need to configure your REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
63
- }
64
- if (!replicateModel) {
65
- throw new Error(`you need to configure your REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`)
66
- }
67
- if (!replicateModelVersion) {
68
- throw new Error(`you need to configure your REPLICATE_API_MODEL_VERSION in order to use the REPLICATE rendering engine`)
69
- }
70
- const replicate = new Replicate({ auth: replicateToken })
71
-
72
- const seed = generateSeed()
73
- const prediction = await replicate.predictions.create({
74
- version: replicateModelVersion,
75
- input: {
76
- prompt: [
77
- "beautiful",
78
- // "intricate details",
79
- prompt,
80
- "award winning",
81
- "high resolution"
82
- ].join(", "),
83
- width,
84
- height,
85
- seed
86
- }
87
- })
88
-
89
- // no need to reply straight away as images take time to generate, this isn't instantaneous
90
- // also our friends at Replicate won't like it if we spam them with requests
91
- await sleep(4000)
92
-
93
- return {
94
- renderId: prediction.id,
95
- status: "pending",
96
- assetUrl: "",
97
- alt: prompt,
98
- error: prediction.error,
99
- maskUrl: "",
100
- segments: []
101
- } as RenderedScene
102
- } if (renderingEngine === "INFERENCE_ENDPOINT" || renderingEngine === "INFERENCE_API") {
103
- if (!huggingFaceToken) {
104
- throw new Error(`you need to configure your HF_API_TOKEN in order to use the ${renderingEngine} rendering engine`)
105
- }
106
- if (renderingEngine === "INFERENCE_ENDPOINT" && !huggingFaceInferenceEndpointUrl) {
107
- throw new Error(`you need to configure your RENDERING_HF_INFERENCE_ENDPOINT_URL in order to use the INFERENCE_ENDPOINT rendering engine`)
108
- }
109
- if (renderingEngine === "INFERENCE_API" && !huggingFaceInferenceApiBaseModel) {
110
- throw new Error(`you need to configure your RENDERING_HF_INFERENCE_API_BASE_MODEL in order to use the INFERENCE_API rendering engine`)
111
- }
112
- if (renderingEngine === "INFERENCE_API" && !huggingFaceInferenceApiRefinerModel) {
113
- throw new Error(`you need to configure your RENDERING_HF_INFERENCE_API_REFINER_MODEL in order to use the INFERENCE_API rendering engine`)
114
- }
115
-
116
- const baseModelUrl = renderingEngine === "INFERENCE_ENDPOINT"
117
- ? huggingFaceInferenceEndpointUrl
118
- : `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiBaseModel}`
119
-
120
- const positivePrompt = [
121
- "beautiful",
122
- // "intricate details",
123
- prompt,
124
- "award winning",
125
- "high resolution"
126
- ].join(", ")
127
-
128
- const res = await fetch(baseModelUrl, {
129
- method: "POST",
130
- headers: {
131
- "Content-Type": "application/json",
132
- Authorization: `Bearer ${huggingFaceToken}`,
133
- },
134
- body: JSON.stringify({
135
- inputs: positivePrompt,
136
- parameters: {
137
- num_inference_steps: nbInferenceSteps,
138
- guidance_scale: guidanceScale,
139
- width,
140
- height,
141
- },
142
-
143
- // this doesn't do what you think it does
144
- use_cache: false, // withCache,
145
- }),
146
- cache: "no-store",
147
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
148
- // next: { revalidate: 1 }
149
- })
150
-
151
-
152
- // Recommendation: handle errors
153
- if (res.status !== 200) {
154
- const content = await res.text()
155
- console.error(content)
156
- // This will activate the closest `error.js` Error Boundary
157
- throw new Error('Failed to fetch data')
158
- }
159
-
160
- const blob = await res.arrayBuffer()
161
-
162
- const contentType = res.headers.get('content-type')
163
-
164
- let assetUrl = `data:${contentType};base64,${Buffer.from(blob).toString('base64')}`
165
-
166
- // note: there is no "refiner" step yet for custom inference endpoint
167
- // you probably don't need it anyway, as you probably want to deploy an all-in-one model instead for perf reasons
168
-
169
- if (renderingEngine === "INFERENCE_API") {
170
- try {
171
- const refinerModelUrl = `https://api-inference.huggingface.co/models/${huggingFaceInferenceApiRefinerModel}`
172
-
173
-
174
-
175
- const res = await fetch(refinerModelUrl, {
176
- method: "POST",
177
- headers: {
178
- "Content-Type": "application/json",
179
- Authorization: `Bearer ${huggingFaceToken}`,
180
- },
181
- body: JSON.stringify({
182
- inputs: Buffer.from(blob).toString('base64'),
183
- parameters: {
184
- prompt: positivePrompt,
185
- num_inference_steps: nbInferenceSteps,
186
- guidance_scale: guidanceScale,
187
- width,
188
- height,
189
- },
190
-
191
- // this doesn't do what you think it does
192
- use_cache: false, // withCache,
193
- }),
194
- cache: "no-store",
195
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
196
- // next: { revalidate: 1 }
197
- })
198
-
199
-
200
- // Recommendation: handle errors
201
- if (res.status !== 200) {
202
- const content = await res.json()
203
- // if (content.error.include("currently loading")) {
204
- // console.log("refiner isn't ready yet")
205
- throw new Error(content?.error || 'Failed to fetch data')
206
- }
207
-
208
- const refinedBlob = await res.arrayBuffer()
209
-
210
- const contentType = res.headers.get('content-type')
211
-
212
- assetUrl = `data:${contentType};base64,${Buffer.from(refinedBlob).toString('base64')}`
213
-
214
- } catch (err) {
215
- console.log(`Refiner step failed, but this is not a blocker. Error details: ${err}`)
216
- }
217
- }
218
-
219
-
220
- return {
221
- renderId: uuidv4(),
222
- status: "completed",
223
- assetUrl,
224
- alt: prompt,
225
- error: "",
226
- maskUrl: "",
227
- segments: []
228
- } as RenderedScene
229
- } else {
230
- const res = await fetch(`${videochainApiUrl}${videochainApiUrl.endsWith("/") ? "" : "/"}render`, {
231
- method: "POST",
232
- headers: {
233
- Accept: "application/json",
234
- "Content-Type": "application/json",
235
- Authorization: `Bearer ${videochainToken}`,
236
- },
237
- body: JSON.stringify({
238
- prompt,
239
- // negativePrompt, unused for now
240
- nbFrames: 1,
241
- nbSteps: nbInferenceSteps, // 20 = fast, 30 = better, 50 = best
242
- actionnables: [], // ["text block"],
243
- segmentation: "disabled", // "firstframe", // one day we will remove this param, to make it automatic
244
- width,
245
- height,
246
-
247
- // no need to upscale right now as we generate tiny panels
248
- // maybe later we can provide an "export" button to PDF
249
- // unfortunately there are too many requests for upscaling,
250
- // the server is always down
251
- upscalingFactor: 1, // 2,
252
-
253
- // analyzing doesn't work yet, it seems..
254
- analyze: false, // analyze: true,
255
-
256
- cache: "ignore"
257
- } as Partial<RenderRequest>),
258
- cache: 'no-store',
259
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
260
- // next: { revalidate: 1 }
261
- })
262
-
263
- if (res.status !== 200) {
264
- throw new Error('Failed to fetch data')
265
- }
266
-
267
- const response = (await res.json()) as RenderedScene
268
-
269
- return response
270
- }
271
- } catch (err) {
272
- console.error(err)
273
- return defaulResult
274
- }
275
- }
276
-
277
- export async function getRender(renderId: string) {
278
- if (!renderId) {
279
- const error = `cannot call the rendering API without a renderId, aborting..`
280
- console.error(error)
281
- throw new Error(error)
282
- }
283
-
284
- let defaulResult: RenderedScene = {
285
- renderId: "",
286
- status: "pending",
287
- assetUrl: "",
288
- alt: "",
289
- maskUrl: "",
290
- error: "failed to fetch the data",
291
- segments: []
292
- }
293
-
294
- try {
295
- if (renderingEngine === "REPLICATE") {
296
- if (!replicateToken) {
297
- throw new Error(`you need to configure your AUTH_REPLICATE_API_TOKEN in order to use the REPLICATE rendering engine`)
298
- }
299
- if (!replicateModel) {
300
- throw new Error(`you need to configure your RENDERING_REPLICATE_API_MODEL in order to use the REPLICATE rendering engine`)
301
- }
302
-
303
- const res = await fetch(`https://api.replicate.com/v1/predictions/${renderId}`, {
304
- method: "GET",
305
- headers: {
306
- Authorization: `Token ${replicateToken}`,
307
- },
308
- cache: 'no-store',
309
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
310
- // next: { revalidate: 1 }
311
- })
312
-
313
- // Recommendation: handle errors
314
- if (res.status !== 200) {
315
- // This will activate the closest `error.js` Error Boundary
316
- throw new Error('Failed to fetch data')
317
- }
318
-
319
- const response = (await res.json()) as any
320
-
321
- return {
322
- renderId,
323
- status: response?.error ? "error" : response?.status === "succeeded" ? "completed" : "pending",
324
- assetUrl: `${response?.output || ""}`,
325
- alt: `${response?.input?.prompt || ""}`,
326
- error: `${response?.error || ""}`,
327
- maskUrl: "",
328
- segments: []
329
- } as RenderedScene
330
- } else {
331
- const res = await fetch(`${videochainApiUrl}/render/${renderId}`, {
332
- method: "GET",
333
- headers: {
334
- Accept: "application/json",
335
- "Content-Type": "application/json",
336
- Authorization: `Bearer ${videochainToken}`,
337
- },
338
- cache: 'no-store',
339
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
340
- // next: { revalidate: 1 }
341
- })
342
-
343
- if (res.status !== 200) {
344
- throw new Error('Failed to fetch data')
345
- }
346
-
347
- const response = (await res.json()) as RenderedScene
348
- return response
349
- }
350
- } catch (err) {
351
- console.error(err)
352
- defaulResult.status = "error"
353
- defaulResult.error = `${err}`
354
- return defaulResult
355
- }
356
- }
357
-
358
- export async function upscaleImage(image: string): Promise<{
359
- assetUrl: string
360
- error: string
361
- }> {
362
- if (!image) {
363
- const error = `cannot call the rendering API without an image, aborting..`
364
- console.error(error)
365
- throw new Error(error)
366
- }
367
-
368
- let defaulResult = {
369
- assetUrl: "",
370
- error: "failed to fetch the data",
371
- }
372
-
373
- try {
374
- const res = await fetch(`${videochainApiUrl}/upscale`, {
375
- method: "POST",
376
- headers: {
377
- Accept: "application/json",
378
- "Content-Type": "application/json",
379
- Authorization: `Bearer ${videochainToken}`,
380
- },
381
- cache: 'no-store',
382
- body: JSON.stringify({ image, factor: 3 })
383
- // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
384
- // next: { revalidate: 1 }
385
- })
386
-
387
- if (res.status !== 200) {
388
- throw new Error('Failed to fetch data')
389
- }
390
-
391
- const response = (await res.json()) as {
392
- assetUrl: string
393
- error: string
394
- }
395
- return response
396
- } catch (err) {
397
- console.error(err)
398
- return defaulResult
399
- }
400
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/tools/infer_cli.py DELETED
@@ -1,67 +0,0 @@
1
- import argparse
2
- import os
3
- import sys
4
-
5
- now_dir = os.getcwd()
6
- sys.path.append(now_dir)
7
- from dotenv import load_dotenv
8
- from scipy.io import wavfile
9
-
10
- from configs.config import Config
11
- from infer.modules.vc.modules import VC
12
-
13
- ####
14
- # USAGE
15
- #
16
- # In your Terminal or CMD or whatever
17
-
18
-
19
- def arg_parse() -> tuple:
20
- parser = argparse.ArgumentParser()
21
- parser.add_argument("--f0up_key", type=int, default=0)
22
- parser.add_argument("--input_path", type=str, help="input path")
23
- parser.add_argument("--index_path", type=str, help="index path")
24
- parser.add_argument("--f0method", type=str, default="harvest", help="harvest or pm")
25
- parser.add_argument("--opt_path", type=str, help="opt path")
26
- parser.add_argument("--model_name", type=str, help="store in assets/weight_root")
27
- parser.add_argument("--index_rate", type=float, default=0.66, help="index rate")
28
- parser.add_argument("--device", type=str, help="device")
29
- parser.add_argument("--is_half", type=bool, help="use half -> True")
30
- parser.add_argument("--filter_radius", type=int, default=3, help="filter radius")
31
- parser.add_argument("--resample_sr", type=int, default=0, help="resample sr")
32
- parser.add_argument("--rms_mix_rate", type=float, default=1, help="rms mix rate")
33
- parser.add_argument("--protect", type=float, default=0.33, help="protect")
34
-
35
- args = parser.parse_args()
36
- sys.argv = sys.argv[:1]
37
-
38
- return args
39
-
40
-
41
- def main():
42
- load_dotenv()
43
- args = arg_parse()
44
- config = Config()
45
- config.device = args.device if args.device else config.device
46
- config.is_half = args.is_half if args.is_half else config.is_half
47
- vc = VC(config)
48
- vc.get_vc(args.model_name)
49
- _, wav_opt = vc.vc_single(
50
- 0,
51
- args.input_path,
52
- args.f0up_key,
53
- None,
54
- args.f0method,
55
- args.index_path,
56
- None,
57
- args.index_rate,
58
- args.filter_radius,
59
- args.resample_sr,
60
- args.rms_mix_rate,
61
- args.protect,
62
- )
63
- wavfile.write(args.opt_path, wav_opt[0], wav_opt[1])
64
-
65
-
66
- if __name__ == "__main__":
67
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bavesh/Oral_Cancer_Detection/app.py DELETED
@@ -1,209 +0,0 @@
1
-
2
- import numpy as np
3
- import pandas as pd
4
- import tensorflow as tf
5
- from tensorflow import keras
6
- from keras.models import Sequential
7
- from keras.layers import Dense
8
- from keras.layers import Dropout
9
- from keras.layers import Flatten
10
- from keras.constraints import maxnorm
11
- from tensorflow.keras.optimizers import SGD
12
- from keras.layers.convolutional import Conv2D
13
- from keras.layers import Dense, Conv2D ,Flatten,Dropout,MaxPool2D, BatchNormalization
14
- from keras.utils import np_utils
15
- import tensorflow as tf
16
- from keras.preprocessing.image import ImageDataGenerator
17
- from tensorflow.keras.preprocessing import image_dataset_from_directory
18
- from tensorflow.keras.preprocessing import image
19
- from tensorflow.keras.applications.vgg19 import VGG19
20
- import keras
21
- from PIL import Image
22
- import matplotlib.pyplot as plt
23
- import seaborn
24
- from sklearn.metrics import confusion_matrix , classification_report
25
- import os
26
- import cv2
27
- from skimage.transform import resize
28
- import streamlit as st
29
-
30
- def get_output_layers(net):
31
-
32
- layer_names = net.getLayerNames()
33
-
34
- output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
35
-
36
- return output_layers
37
-
38
- # function to draw bounding box on the detected object with class name
39
- def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h, COLORS):
40
-
41
- label = f'cancer:{confidence}'
42
-
43
- color = COLORS[class_id]
44
-
45
- cv2.rectangle(img, (x,y), (x_plus_w,y_plus_h), color, 2)
46
-
47
- cv2.putText(img, label, (x-10,y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
48
-
49
- # plt.imshow(img)
50
- # plt.show()
51
-
52
- def detection_inference(image, scale = 1/255, image_size = 416, conf_threshold = 0.1, nms_threshold = 0.4):
53
- Width = image.shape[1]
54
- Height = image.shape[0]
55
-
56
- net=cv2.dnn.readNet('yolov4-custom_best.weights','yolov4-custom.cfg')
57
- COLORS = np.random.uniform(0, 255, size=(1, 3))
58
-
59
- blob = cv2.dnn.blobFromImage(image, scale, (image_size, image_size), (0,0,0), True, crop=False)
60
- net.setInput(blob)
61
-
62
- outs = net.forward(get_output_layers(net))
63
-
64
- class_ids = []
65
- confidences = []
66
- boxes = []
67
-
68
- for out in outs:
69
- for detection in out:
70
- scores=detection[5:]
71
- class_id=np.argmax(scores)
72
- confidence=scores[class_id]
73
- if confidence > 0.1:
74
- center_x = int(detection[0] * Width)
75
- center_y = int(detection[1] * Height)
76
- w = int(detection[2] * Width)
77
- h = int(detection[3] * Height)
78
- x = center_x - w / 2
79
- y = center_y - h / 2
80
- class_ids.append(class_id)
81
- confidences.append(float(confidence))
82
- boxes.append([x, y, w, h])
83
-
84
- indices = cv2.dnn.NMSBoxes(boxes, confidences, conf_threshold, nms_threshold)
85
-
86
- for i in indices:
87
- i = i[0]
88
- box = boxes[i]
89
- x = box[0]
90
- y = box[1]
91
- w = box[2]
92
- h = box[3]
93
-
94
- draw_bounding_box(image, class_ids[i], confidences[i], round(x), round(y), round(x+w), round(y+h), COLORS)
95
-
96
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
97
- plt.imshow(image)
98
- plt.show()
99
-
100
- return image
101
- # st.image(image, caption='Object detection output', use_column_width=True)
102
-
103
- def _predict(img, model):
104
- m = keras.models.load_model(model)
105
- img2 = img.resize((224, 224))
106
-
107
- image_array = np.asarray(img2)
108
- new_one = image_array.reshape((1, 224, 224, 3))
109
-
110
- y_pred = m(new_one)
111
- print(y_pred)
112
- val = np.argmax(y_pred, axis = 1)
113
- return y_pred, val
114
-
115
- @tf.custom_gradient
116
- def guidedRelu(x):
117
- def grad(dy):
118
- return tf.cast(dy>0,"float32") * tf.cast(x>0, "float32") * dy
119
- return tf.nn.relu(x), grad
120
-
121
- def gradcam(img, model):
122
- m = keras.models.load_model(model)
123
- LAYER_NAME = 'block5_conv4'
124
- gb_model = tf.keras.models.Model(
125
- inputs = [m.inputs],
126
- outputs = [m.get_layer(LAYER_NAME).output]
127
- )
128
- layer_dict = [layer for layer in gb_model.layers[1:] if hasattr(layer,'activation')]
129
-
130
- for layer in layer_dict:
131
- if layer.activation == tf.keras.activations.relu:
132
- layer.activation = guidedRelu
133
-
134
- img2 = img.resize((224, 224))
135
-
136
- image_array = np.asarray(img2)
137
- print(image_array.shape)
138
- new_one = image_array.reshape((1, 224, 224, 3))
139
-
140
- with tf.GradientTape() as tape:
141
- inputs = tf.cast(new_one, tf.float32)
142
- tape.watch(inputs)
143
- outputs = gb_model(inputs)[0]
144
- grads = tape.gradient(outputs,inputs)[0]
145
-
146
- weights = tf.reduce_mean(grads, axis=(0, 1))
147
- grad_cam = np.ones(outputs.shape[0: 2], dtype = np.float32)
148
- for i, w in enumerate(weights):
149
- grad_cam += w * outputs[:, :, i]
150
-
151
- grad_cam_img = cv2.resize(grad_cam.numpy(), (img.size[0], img.size[1]))
152
- grad_cam_img = np.maximum(grad_cam_img, 0)
153
- heatmap = (grad_cam_img - grad_cam_img.min()) / (grad_cam_img.max() - grad_cam_img.min())
154
- grad_cam_img = cv2.applyColorMap(np.uint8(255*heatmap), cv2.COLORMAP_JET)
155
- output_image = cv2.addWeighted(np.asarray(img).astype('uint8'), 1, grad_cam_img, 0.4, 0)
156
-
157
- output_img = Image.fromarray(output_image)
158
-
159
- st.image(output_img, caption='Class Activation Visualization', use_column_width=True)
160
-
161
- plt.imshow(output_image)
162
- plt.axis("off")
163
- plt.show()
164
-
165
- # guided_back_prop = grads
166
- # guided_cam = np.maximum(grad_cam, 0)
167
- # guided_cam = guided_cam / np.max(guided_cam) # scale 0 to 1.0
168
- # guided_cam = resize(guided_cam, (224,224), preserve_range=True)
169
-
170
- # #pointwise multiplcation of guided backprop and grad CAM
171
- # gd_gb = np.dstack((
172
- # guided_back_prop[:, :, 0] * guided_cam,
173
- # guided_back_prop[:, :, 1] * guided_cam,
174
- # guided_back_prop[:, :, 2] * guided_cam,
175
- # ))
176
- # plt.imshow(gd_gb)
177
- # plt.axis("off")
178
- # plt.show()
179
-
180
- uploaded_file = st.file_uploader(
181
- "Choose an image of your mouth", type=['jpg', 'jpeg', 'png'])
182
-
183
- if uploaded_file is not None:
184
- img = Image.open(uploaded_file).convert('RGB')
185
- cv_img = np.array(img)
186
- cv_img = cv2.cvtColor(cv_img, cv2.COLOR_RGB2BGR)
187
- # img2 = Image.open('test.jpg')
188
- st.image(img, caption='Uploaded file of your mouth', use_column_width=True)
189
-
190
- # similarity = ssim(img, img2)
191
- # st.write("")
192
- # st.write(f'This is {similarity * 100}% histopathological image')
193
-
194
- # if similarity >= 0.85:
195
- st.write("")
196
- st.write("Classifying...")
197
-
198
- y_pred, val = _predict(img, 'oral cancer-vggg19.h5')
199
- if val == 0:
200
- st.write(f'The patient has cancer.')
201
- final_img = detection_inference(cv_img)
202
- final_pil_image = Image.fromarray(final_img)
203
- gradcam(final_pil_image, 'oral cancer-vggg19.h5')
204
- else:
205
- st.write(f'The patient does not have cancer.')
206
- gradcam(img, 'oral cancer-vggg19.h5')
207
-
208
-
209
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bazedgul/YoutubeVideo-Transcript-Summarization/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: YoutubeVideo Transcript Summarization
3
- emoji: 🌖
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- license: cc
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Apk Para El Whatsapp Del Gb.md DELETED
@@ -1,49 +0,0 @@
1
- <br />
2
- <h1>Apk para WhatsApp GB: Todo lo que necesita saber</h1>
3
- <p>WhatsApp es una de las aplicaciones de mensajería más populares del mundo, con más de 2 mil millones de usuarios. Sin embargo, algunas personas no están satisfechas con las características y limitaciones de la aplicación oficial, y buscan versiones alternativas que ofrezcan más personalización y funcionalidad. Una de estas versiones es GB WhatsApp, una aplicación modded que afirma proporcionar más características y opciones que el WhatsApp original. Pero lo que es apk para whatsapp gb, y cómo funciona? ¿Es seguro de usar, y cuáles son las mejores alternativas? En este artículo, responderemos estas preguntas y más. </p>
4
- <h2>apk para el whatsapp del gb</h2><br /><p><b><b>Download Zip</b> &#9881; <a href="https://bltlly.com/2v6KKI">https://bltlly.com/2v6KKI</a></b></p><br /><br />
5
- <h2>¿Qué es apk para whatsapp gb? </h2>
6
- <p>Apk for gb whatsapp es un archivo que contiene el paquete de instalación de GB WhatsApp, una versión modificada de WhatsApp que ofrece algunas características adicionales, tales como:</p>
7
- <ul>
8
- <li>Usar dos cuentas de WhatsApp en el mismo dispositivo</li>
9
- <li>Ocultar el estado en línea, visto por última vez, las garrapatas azules, y las notificaciones de escritura</li>
10
- <li>Personalización de temas, fuentes, iconos y notificaciones</li>
11
- <li>Enviar archivos más grandes y más medios a la vez</li>
12
- <li>Visualización de mensajes y estados eliminados</li>
13
- <li>Configuración de mensajes de respuesta automática y mensajes programados</li>
14
- <li>Usando más emojis y pegatinas</li>
15
- </ul>
16
- <p>GB WhatsApp no está disponible en Google Play Store o Apple App Store, por lo que los usuarios tienen que descargar el archivo apk de sitios web de terceros e instalarlo manualmente en sus dispositivos. Sin embargo, esto también significa que WhatsApp GB no está autorizado o respaldado por WhatsApp o Facebook, y puede plantear algunos riesgos para la seguridad y la privacidad de los usuarios. </p>
17
- <h2>¿Cuáles son los riesgos de usar WhatsApp GB? </h2>
18
- <p>Usar WhatsApp GB puede parecer atractivo, pero también viene con algunos inconvenientes y peligros. Algunos de los riesgos de usar WhatsApp GB son:</p>
19
- <ul>
20
-
21
- <li>Vulnerabilidad al malware: GB WhatsApp se descarga de fuentes no oficiales, que pueden contener virus, spyware u otro software malicioso. Estos pueden dañar su dispositivo, robar sus datos o comprometer sus cuentas. </li>
22
- <li>Incumplimiento de los términos de servicio: Al usar WhatsApp GB, violas los términos de servicio de WhatsApp, lo que puede provocar consecuencias como prohibiciones temporales o permanentes de la aplicación original. Esto puede interrumpir su comunicación con sus contactos y causar que pierda sus chats y copias de seguridad. </li>
23
- <li>Características de privacidad inadecuadas: Aunque GB WhatsApp afirma ofrecer una configuración de privacidad mejorada, es importante recordar que estas características no están desarrolladas o verificadas por WhatsApp. Por lo tanto, es posible que no proporcionen el mismo nivel de seguridad y privacidad que la aplicación oficial. Por ejemplo, GB WhatsApp puede almacenar copias de seguridad no cifradas de sus chats en servidores de terceros, lo que puede exponer sus datos a posibles violaciones. </li>
24
- </ul>
25
- <p>Para evitar estos riesgos, es recomendable usar solo la versión oficial de WhatsApp, o cambiar a una alternativa más confiable y segura. </p>
26
- <p></p>
27
- <h2>¿Cuáles son las mejores alternativas a GB WhatsApp? </h2>
28
- <p>Si estás buscando una aplicación de mensajería que ofrezca más funciones y opciones que WhatsApp, pero sin comprometer tu seguridad y privacidad, quizás quieras considerar algunas de estas alternativas:</p>
29
- <ul>
30
- <li><strong>Signal:</strong> Signal es una aplicación gratuita y de código abierto que proporciona cifrado de extremo a extremo para todos sus mensajes y llamadas. También tiene características tales como mensajes de autodestrucción, chats de grupo, pegatinas, notas de voz y más. Signal es ampliamente considerada como una de las aplicaciones de mensajería más seguras y amigables con la privacidad disponibles. </li>
31
-
32
- <li><strong>Threema:</strong> Threema es una aplicación de pago que ofrece cifrado de extremo a extremo para todos sus mensajes, llamadas y archivos. También tiene características tales como encuestas, mensajes de voz, chats de grupo y más. Threema no requiere su número de teléfono o dirección de correo electrónico para usar la aplicación, y no recopila ningún metadato o datos de usuario. </li>
33
- </ul>
34
- <p>Estas son algunas de las mejores alternativas a WhatsApp GB que puedes probar. Sin embargo, hay muchas otras aplicaciones de mensajería disponibles, y usted debe hacer su propia investigación y comparación antes de elegir la que se adapte a sus necesidades y preferencias. </p>
35
- <h2>Conclusión</h2>
36
- <p>Apk para WhatsApp gb es una versión modificada de WhatsApp que ofrece algunas características adicionales y opciones de personalización. Sin embargo, también conlleva algunos riesgos y desventajas, como la falta de soporte oficial, la vulnerabilidad al malware, el incumplimiento de los términos del servicio y las características de privacidad inadecuadas. Por lo tanto, es mejor usar la versión oficial de WhatsApp, o cambiar a una alternativa más confiable y segura, como Signal, Telegram o Threema. Estas aplicaciones ofrecen más funciones y opciones que WhatsApp, pero sin comprometer su seguridad y privacidad. Esperamos que este artículo te ha ayudado a entender lo que es apk for gb whatsapp, y cómo elegir la mejor aplicación de mensajería para usted. </p>
37
- <h2>Preguntas frecuentes</h2>
38
- <h3>¿Cuál es la diferencia entre WhatsApp y GB WhatsApp? </h3>
39
- <p>WhatsApp es la versión oficial de la aplicación de mensajería desarrollada por WhatsApp Inc., una filial de Facebook. GB WhatsApp es una versión modificada de WhatsApp que ofrece algunas características y opciones adicionales que no están disponibles en la aplicación oficial. </p>
40
- <h3>¿Es legal GB WhatsApp? </h3>
41
- <p>GB WhatsApp no es ilegal, pero no está autorizado o respaldado por WhatsApp o Facebook. Al usar WhatsApp GB, violas los términos de servicio de WhatsApp, lo que puede provocar consecuencias como prohibiciones temporales o permanentes de la aplicación original. </p>
42
- <h3>¿Es seguro WhatsApp GB? </h3>
43
-
44
- <h3>¿Cómo puedo descargar WhatsApp GB? </h3>
45
- <p>Puede descargar WhatsApp GB de sitios web de terceros que proporcionan el archivo apk para la aplicación. Sin embargo, no recomendamos hacerlo, ya que estos sitios web pueden ser poco fiables o inseguros. Siempre debes descargar aplicaciones de fuentes oficiales como Google Play Store o Apple App Store.</p>
46
- <h3>¿Cómo puedo actualizar GB WhatsApp? </h3>
47
- <p>Puede actualizar GB WhatsApp descargando el último archivo apk de sitios web de terceros e instalándolo manualmente en su dispositivo. Sin embargo, no recomendamos hacerlo, ya que estos sitios web pueden ser poco fiables o inseguros. Siempre debes actualizar aplicaciones de fuentes oficiales como Google Play Store o Apple App Store.</p> 64aa2da5cf<br />
48
- <br />
49
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Capitn Tsubasa Sueo Equipo Apk Datos.md DELETED
@@ -1,53 +0,0 @@
1
-
2
- <h1>Descargar Capitán Tsubasa Dream Team APK + Datos para Android</h1>
3
- <p>Si eres un fan de la legendaria serie de anime y manga Captain Tsubasa, te encantará este juego. Captain Tsubasa Dream Team es un juego móvil que te permite crear tu propio equipo ideal de jugadores de fútbol y competir contra otros usuarios de todo el mundo. También puede revivir la historia original y disfrutar de los personajes y escenas familiares. En este artículo, le mostraremos cómo descargar e instalar Captain Tsubasa Dream Team APK + Data para Android, y por qué debe jugar a este juego. </p>
4
- <h2>¿Qué es el Capitán Tsubasa Dream Team? </h2>
5
- <p>Captain Tsubasa Dream Team es un juego de simulación de fútbol basado en la popular serie de anime y manga de Yoichi Takahashi. El juego fue desarrollado por KLab Games y lanzado en 2017. Tiene más de 10 millones de descargas en Google Play Store y ha recibido críticas positivas de fans y críticos por igual. </p>
6
- <h2>descargar capitán tsubasa sueño equipo apk + datos</h2><br /><p><b><b>DOWNLOAD</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://bltlly.com/2v6Kky">https://bltlly.com/2v6Kky</a></b></p><br /><br />
7
- <h3>Características del Capitán Tsubasa Dream Team</h3>
8
- <p>El juego tiene muchas características que lo hacen divertido y emocionante para jugar. Aquí están algunas de ellas:</p>
9
- <h4>Crea tu propio equipo de ensueño</h4>
10
- <p>Puedes elegir entre más de 1000 jugadores del universo Captain Tsubasa, cada uno con sus propias habilidades y habilidades únicas. También puedes personalizar el nombre, emblema, uniforme y formación de tu equipo. Incluso puedes transferir habilidades entre jugadores para crear tus propias combinaciones. </p>
11
- <h4>Batalla contra otros jugadores en línea</h4>
12
- <p>Puedes poner a prueba tus habilidades y estrategias contra otros jugadores de todo el mundo en varios modos, como Liga, Copa, Amistosos y Clasificaciones. También puede unirse a un club y cooperar con otros miembros para ganar recompensas y bonos. </p>
13
- <h4>Disfruta de la historia y los personajes originales</h4>
14
-
15
- <h3>Cómo descargar e instalar Capitán Tsubasa Dream Team APK + Datos</h3>
16
- <p>Si desea jugar este juego en su dispositivo Android, tendrá que descargar e instalar el archivo APK y la carpeta de datos. Estos son los pasos para hacerlo:</p>
17
- <h4>Descargar el APK y archivos de datos de una fuente de confianza</h4>
18
- <p>Puede descargar el archivo APK y la carpeta de datos desde un sitio web confiable, como [ReXdl.com]( 1 ). La versión actual del juego es 8.0.2, que requiere Android 4.4 o superior. El tamaño del archivo es 111 MB para el archivo APK y 117 MB para la carpeta de datos. </p>
19
- <h4>Habilitar fuentes desconocidas en su dispositivo</h4>
20
- <p>Antes de instalar el archivo APK, debe habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </p>
21
- <p></p>
22
- <h4>Instalar el archivo APK y extraer la carpeta de datos</h4>
23
- <p>Después de descargar los archivos, localizarlos en el almacenamiento de su dispositivo usando una aplicación de administrador de archivos. Toque en el archivo APK para instalarlo. Luego, extrae la carpeta de datos usando una aplicación como ZArchiver o RAR. Deberías obtener una carpeta llamada "com.k lab.games.tsubasa.global" y copiarla a Android > obb > com.klab.games.tsubasa.global. Asegúrese de que la carpeta esté en la ruta correcta. </p>
24
- <h4>Iniciar el juego y disfrutar</h4>
25
- <p>Ahora puede iniciar el juego desde el cajón de la aplicación y empezar a jugar. Es posible que necesite descargar algunos datos adicionales cuando abra el juego por primera vez, así que asegúrese de tener una conexión a Internet estable. También puedes vincular tu cuenta de juego a Facebook o Google Play Games para guardar tu progreso y acceder a él en otros dispositivos. </p>
26
- <h2>Por qué deberías jugar Captain Tsubasa Dream Team</h2>
27
- <p>Capitán Tsubasa Dream Team no es solo un juego para los fans de la serie de anime y manga. También es un juego para cualquier persona que ama el fútbol y la estrategia. Aquí hay algunas razones por las que deberías jugar a este juego:</p>
28
- <h3>Experimenta la nostalgia de la serie clásica de anime y manga</h3>
29
-
30
- <h3>Personaliza tu equipo con varias habilidades y formaciones</h3>
31
- <p>Puedes dar rienda suelta a tu creatividad y estrategia cuando construyes tu equipo ideal. Puedes elegir entre diferentes tipos de jugadores, como delanteros, mediocampistas, defensores y porteros. También puede equiparlos con varias habilidades, como disparos, pases, regates, tackles, bloqueos y ahorros. También puedes cambiar la formación y las tácticas de tu equipo dependiendo de tu oponente y situación. </p>
32
- <h3>Ponte a prueba con diferentes modos y eventos</h3>
33
- <p>Nunca te aburrirás con este juego, ya que siempre hay nuevos modos y eventos para probar. Puedes jugar el modo Historia para seguir la trama original y desbloquear nuevos episodios. Usted puede jugar el modo en línea para competir con otros jugadores y subir el ranking. También puedes jugar en el modo Evento para participar en campañas y torneos especiales y ganar recompensas exclusivas. </p>
34
- <h2>Conclusión</h2>
35
- <p>Captain Tsubasa Dream Team es un juego que atraerá tanto a los fans como a los recién llegados de la serie de anime y manga. Es un juego que combina simulación y estrategia de fútbol con impresionantes gráficos y animaciones. Es un juego que te permite crear tu propio equipo de ensueño y luchar contra otros jugadores en línea. Es un juego que te permite disfrutar de la historia original y los personajes de Captain Tsubasa.</p>
36
- <p>Si desea jugar este juego en su dispositivo Android, puede descargar e instalar Captain Tsubasa Dream Team APK + Datos de una fuente de confianza, como [ReXdl.com]. Solo tienes que seguir los pasos que hemos proporcionado en este artículo y usted estará listo para jugar en ningún momento. </p>
37
- <p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
38
- <h2>Preguntas frecuentes</h2>
39
- <p>Aquí hay algunas preguntas frecuentes sobre Captain Tsubasa Dream Team:</p>
40
- <ol>
41
- <li><b>¿El Capitán Tsubasa Dream Team es libre de jugar? </b></li>
42
-
43
- <li><b>¿Cómo puedo obtener más Dreamballs? </b></li>
44
- <p>Puedes obtener más Dreamballs completando varias tareas y logros en el juego, como borrar etapas, iniciar sesión diariamente, participar en eventos, etc. También puedes comprarlos con dinero real si quieres apoyar a los desarrolladores. </p>
45
- <li><b>¿Cómo puedo conseguir más jugadores? </b></li>
46
- <p>Puedes conseguir más jugadores usando entradas de Gacha o Dreamballs. Hay diferentes tipos de Gacha, como Gacha Normal (que usa Puntos de Amigo), Gacha de Transferencia (que usa Tickets de Transferencia o Dreamballs), Gacha de Evento (que usa Tickets de Evento o Dreamballs), etc. Cada Gacha tiene diferentes tarifas y grupos de jugadores que puedes obtener. </p>
47
- <li><b>¿Cómo puedo actualizar mis reproductores? </b></li>
48
- <p>Puedes mejorar a tus jugadores usando varios elementos y materiales en el juego, como entrenadores (que aumentan las estadísticas de tus jugadores), cartas de habilidad (que enseñan a tus jugadores nuevas habilidades o mejoran las existentes), objetos de evolución (que te permiten evolucionar a tus jugadores a rarezas más altas), etc. Puedes obtener estos objetos jugando partidos, completando misiones, intercambiando medallas, etc <p>. </p>
49
- <li><b>¿Cómo puedo vincular mi cuenta de juego a Facebook o Google Play Games? </b></li>
50
- <p>Puedes vincular tu cuenta de juego a Facebook o Google Play Games yendo a Menú > Otros > Transferencia de datos y eligiendo la opción que prefieras. Esto le permitirá guardar su progreso y acceder a él en otros dispositivos. También puede usar un código de transferencia para transferir sus datos manualmente. </p>
51
- </ol></p> 64aa2da5cf<br />
52
- <br />
53
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py DELETED
@@ -1,296 +0,0 @@
1
- import functools
2
- import logging
3
- import os
4
- from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
5
-
6
- from pip._vendor.packaging.utils import canonicalize_name
7
- from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
8
- from pip._vendor.resolvelib import Resolver as RLResolver
9
- from pip._vendor.resolvelib.structs import DirectedGraph
10
-
11
- from pip._internal.cache import WheelCache
12
- from pip._internal.index.package_finder import PackageFinder
13
- from pip._internal.operations.prepare import RequirementPreparer
14
- from pip._internal.req.req_install import InstallRequirement
15
- from pip._internal.req.req_set import RequirementSet
16
- from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
17
- from pip._internal.resolution.resolvelib.provider import PipProvider
18
- from pip._internal.resolution.resolvelib.reporter import (
19
- PipDebuggingReporter,
20
- PipReporter,
21
- )
22
-
23
- from .base import Candidate, Requirement
24
- from .factory import Factory
25
-
26
- if TYPE_CHECKING:
27
- from pip._vendor.resolvelib.resolvers import Result as RLResult
28
-
29
- Result = RLResult[Requirement, Candidate, str]
30
-
31
-
32
- logger = logging.getLogger(__name__)
33
-
34
-
35
- class Resolver(BaseResolver):
36
- _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
37
-
38
- def __init__(
39
- self,
40
- preparer: RequirementPreparer,
41
- finder: PackageFinder,
42
- wheel_cache: Optional[WheelCache],
43
- make_install_req: InstallRequirementProvider,
44
- use_user_site: bool,
45
- ignore_dependencies: bool,
46
- ignore_installed: bool,
47
- ignore_requires_python: bool,
48
- force_reinstall: bool,
49
- upgrade_strategy: str,
50
- py_version_info: Optional[Tuple[int, ...]] = None,
51
- ):
52
- super().__init__()
53
- assert upgrade_strategy in self._allowed_strategies
54
-
55
- self.factory = Factory(
56
- finder=finder,
57
- preparer=preparer,
58
- make_install_req=make_install_req,
59
- wheel_cache=wheel_cache,
60
- use_user_site=use_user_site,
61
- force_reinstall=force_reinstall,
62
- ignore_installed=ignore_installed,
63
- ignore_requires_python=ignore_requires_python,
64
- py_version_info=py_version_info,
65
- )
66
- self.ignore_dependencies = ignore_dependencies
67
- self.upgrade_strategy = upgrade_strategy
68
- self._result: Optional[Result] = None
69
-
70
- def resolve(
71
- self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
72
- ) -> RequirementSet:
73
- collected = self.factory.collect_root_requirements(root_reqs)
74
- provider = PipProvider(
75
- factory=self.factory,
76
- constraints=collected.constraints,
77
- ignore_dependencies=self.ignore_dependencies,
78
- upgrade_strategy=self.upgrade_strategy,
79
- user_requested=collected.user_requested,
80
- )
81
- if "PIP_RESOLVER_DEBUG" in os.environ:
82
- reporter: BaseReporter = PipDebuggingReporter()
83
- else:
84
- reporter = PipReporter()
85
- resolver: RLResolver[Requirement, Candidate, str] = RLResolver(
86
- provider,
87
- reporter,
88
- )
89
-
90
- try:
91
- limit_how_complex_resolution_can_be = 200000
92
- result = self._result = resolver.resolve(
93
- collected.requirements, max_rounds=limit_how_complex_resolution_can_be
94
- )
95
-
96
- except ResolutionImpossible as e:
97
- error = self.factory.get_installation_error(
98
- cast("ResolutionImpossible[Requirement, Candidate]", e),
99
- collected.constraints,
100
- )
101
- raise error from e
102
-
103
- req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
104
- for candidate in result.mapping.values():
105
- ireq = candidate.get_install_requirement()
106
- if ireq is None:
107
- continue
108
-
109
- # Check if there is already an installation under the same name,
110
- # and set a flag for later stages to uninstall it, if needed.
111
- installed_dist = self.factory.get_dist_to_uninstall(candidate)
112
- if installed_dist is None:
113
- # There is no existing installation -- nothing to uninstall.
114
- ireq.should_reinstall = False
115
- elif self.factory.force_reinstall:
116
- # The --force-reinstall flag is set -- reinstall.
117
- ireq.should_reinstall = True
118
- elif installed_dist.version != candidate.version:
119
- # The installation is different in version -- reinstall.
120
- ireq.should_reinstall = True
121
- elif candidate.is_editable or installed_dist.editable:
122
- # The incoming distribution is editable, or different in
123
- # editable-ness to installation -- reinstall.
124
- ireq.should_reinstall = True
125
- elif candidate.source_link and candidate.source_link.is_file:
126
- # The incoming distribution is under file://
127
- if candidate.source_link.is_wheel:
128
- # is a local wheel -- do nothing.
129
- logger.info(
130
- "%s is already installed with the same version as the "
131
- "provided wheel. Use --force-reinstall to force an "
132
- "installation of the wheel.",
133
- ireq.name,
134
- )
135
- continue
136
-
137
- # is a local sdist or path -- reinstall
138
- ireq.should_reinstall = True
139
- else:
140
- continue
141
-
142
- link = candidate.source_link
143
- if link and link.is_yanked:
144
- # The reason can contain non-ASCII characters, Unicode
145
- # is required for Python 2.
146
- msg = (
147
- "The candidate selected for download or install is a "
148
- "yanked version: {name!r} candidate (version {version} "
149
- "at {link})\nReason for being yanked: {reason}"
150
- ).format(
151
- name=candidate.name,
152
- version=candidate.version,
153
- link=link,
154
- reason=link.yanked_reason or "<none given>",
155
- )
156
- logger.warning(msg)
157
-
158
- req_set.add_named_requirement(ireq)
159
-
160
- reqs = req_set.all_requirements
161
- self.factory.preparer.prepare_linked_requirements_more(reqs)
162
- return req_set
163
-
164
- def get_installation_order(
165
- self, req_set: RequirementSet
166
- ) -> List[InstallRequirement]:
167
- """Get order for installation of requirements in RequirementSet.
168
-
169
- The returned list contains a requirement before another that depends on
170
- it. This helps ensure that the environment is kept consistent as they
171
- get installed one-by-one.
172
-
173
- The current implementation creates a topological ordering of the
174
- dependency graph, giving more weight to packages with less
175
- or no dependencies, while breaking any cycles in the graph at
176
- arbitrary points. We make no guarantees about where the cycle
177
- would be broken, other than it *would* be broken.
178
- """
179
- assert self._result is not None, "must call resolve() first"
180
-
181
- if not req_set.requirements:
182
- # Nothing is left to install, so we do not need an order.
183
- return []
184
-
185
- graph = self._result.graph
186
- weights = get_topological_weights(graph, set(req_set.requirements.keys()))
187
-
188
- sorted_items = sorted(
189
- req_set.requirements.items(),
190
- key=functools.partial(_req_set_item_sorter, weights=weights),
191
- reverse=True,
192
- )
193
- return [ireq for _, ireq in sorted_items]
194
-
195
-
196
- def get_topological_weights(
197
- graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str]
198
- ) -> Dict[Optional[str], int]:
199
- """Assign weights to each node based on how "deep" they are.
200
-
201
- This implementation may change at any point in the future without prior
202
- notice.
203
-
204
- We first simplify the dependency graph by pruning any leaves and giving them
205
- the highest weight: a package without any dependencies should be installed
206
- first. This is done again and again in the same way, giving ever less weight
207
- to the newly found leaves. The loop stops when no leaves are left: all
208
- remaining packages have at least one dependency left in the graph.
209
-
210
- Then we continue with the remaining graph, by taking the length for the
211
- longest path to any node from root, ignoring any paths that contain a single
212
- node twice (i.e. cycles). This is done through a depth-first search through
213
- the graph, while keeping track of the path to the node.
214
-
215
- Cycles in the graph result would result in node being revisited while also
216
- being on its own path. In this case, take no action. This helps ensure we
217
- don't get stuck in a cycle.
218
-
219
- When assigning weight, the longer path (i.e. larger length) is preferred.
220
-
221
- We are only interested in the weights of packages that are in the
222
- requirement_keys.
223
- """
224
- path: Set[Optional[str]] = set()
225
- weights: Dict[Optional[str], int] = {}
226
-
227
- def visit(node: Optional[str]) -> None:
228
- if node in path:
229
- # We hit a cycle, so we'll break it here.
230
- return
231
-
232
- # Time to visit the children!
233
- path.add(node)
234
- for child in graph.iter_children(node):
235
- visit(child)
236
- path.remove(node)
237
-
238
- if node not in requirement_keys:
239
- return
240
-
241
- last_known_parent_count = weights.get(node, 0)
242
- weights[node] = max(last_known_parent_count, len(path))
243
-
244
- # Simplify the graph, pruning leaves that have no dependencies.
245
- # This is needed for large graphs (say over 200 packages) because the
246
- # `visit` function is exponentially slower then, taking minutes.
247
- # See https://github.com/pypa/pip/issues/10557
248
- # We will loop until we explicitly break the loop.
249
- while True:
250
- leaves = set()
251
- for key in graph:
252
- if key is None:
253
- continue
254
- for _child in graph.iter_children(key):
255
- # This means we have at least one child
256
- break
257
- else:
258
- # No child.
259
- leaves.add(key)
260
- if not leaves:
261
- # We are done simplifying.
262
- break
263
- # Calculate the weight for the leaves.
264
- weight = len(graph) - 1
265
- for leaf in leaves:
266
- if leaf not in requirement_keys:
267
- continue
268
- weights[leaf] = weight
269
- # Remove the leaves from the graph, making it simpler.
270
- for leaf in leaves:
271
- graph.remove(leaf)
272
-
273
- # Visit the remaining graph.
274
- # `None` is guaranteed to be the root node by resolvelib.
275
- visit(None)
276
-
277
- # Sanity check: all requirement keys should be in the weights,
278
- # and no other keys should be in the weights.
279
- difference = set(weights.keys()).difference(requirement_keys)
280
- assert not difference, difference
281
-
282
- return weights
283
-
284
-
285
- def _req_set_item_sorter(
286
- item: Tuple[str, InstallRequirement],
287
- weights: Dict[Optional[str], int],
288
- ) -> Tuple[int, str]:
289
- """Key function used to sort install requirements for installation.
290
-
291
- Based on the "weight" mapping calculated in ``get_installation_order()``.
292
- The canonical package name is returned as the second member as a tie-
293
- breaker to ensure the result is predictable, which is useful in tests.
294
- """
295
- name = canonicalize_name(item[0])
296
- return weights[name], name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/simple_tokenizer.py DELETED
@@ -1,132 +0,0 @@
1
- import gzip
2
- import html
3
- import os
4
- from functools import lru_cache
5
-
6
- import ftfy
7
- import regex as re
8
-
9
-
10
- @lru_cache()
11
- def default_bpe():
12
- return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
13
-
14
-
15
- @lru_cache()
16
- def bytes_to_unicode():
17
- """
18
- Returns list of utf-8 byte and a corresponding list of unicode strings.
19
- The reversible bpe codes work on unicode strings.
20
- This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
21
- When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
22
- This is a signficant percentage of your normal, say, 32K bpe vocab.
23
- To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
24
- And avoids mapping to whitespace/control characters the bpe code barfs on.
25
- """
26
- bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
27
- cs = bs[:]
28
- n = 0
29
- for b in range(2**8):
30
- if b not in bs:
31
- bs.append(b)
32
- cs.append(2**8+n)
33
- n += 1
34
- cs = [chr(n) for n in cs]
35
- return dict(zip(bs, cs))
36
-
37
-
38
- def get_pairs(word):
39
- """Return set of symbol pairs in a word.
40
- Word is represented as tuple of symbols (symbols being variable-length strings).
41
- """
42
- pairs = set()
43
- prev_char = word[0]
44
- for char in word[1:]:
45
- pairs.add((prev_char, char))
46
- prev_char = char
47
- return pairs
48
-
49
-
50
- def basic_clean(text):
51
- text = ftfy.fix_text(text)
52
- text = html.unescape(html.unescape(text))
53
- return text.strip()
54
-
55
-
56
- def whitespace_clean(text):
57
- text = re.sub(r'\s+', ' ', text)
58
- text = text.strip()
59
- return text
60
-
61
-
62
- class SimpleTokenizer(object):
63
- def __init__(self, bpe_path: str = default_bpe()):
64
- self.byte_encoder = bytes_to_unicode()
65
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
66
- merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
67
- merges = merges[1:49152-256-2+1]
68
- merges = [tuple(merge.split()) for merge in merges]
69
- vocab = list(bytes_to_unicode().values())
70
- vocab = vocab + [v+'</w>' for v in vocab]
71
- for merge in merges:
72
- vocab.append(''.join(merge))
73
- vocab.extend(['<|startoftext|>', '<|endoftext|>'])
74
- self.encoder = dict(zip(vocab, range(len(vocab))))
75
- self.decoder = {v: k for k, v in self.encoder.items()}
76
- self.bpe_ranks = dict(zip(merges, range(len(merges))))
77
- self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
78
- self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
79
-
80
- def bpe(self, token):
81
- if token in self.cache:
82
- return self.cache[token]
83
- word = tuple(token[:-1]) + ( token[-1] + '</w>',)
84
- pairs = get_pairs(word)
85
-
86
- if not pairs:
87
- return token+'</w>'
88
-
89
- while True:
90
- bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
91
- if bigram not in self.bpe_ranks:
92
- break
93
- first, second = bigram
94
- new_word = []
95
- i = 0
96
- while i < len(word):
97
- try:
98
- j = word.index(first, i)
99
- new_word.extend(word[i:j])
100
- i = j
101
- except:
102
- new_word.extend(word[i:])
103
- break
104
-
105
- if word[i] == first and i < len(word)-1 and word[i+1] == second:
106
- new_word.append(first+second)
107
- i += 2
108
- else:
109
- new_word.append(word[i])
110
- i += 1
111
- new_word = tuple(new_word)
112
- word = new_word
113
- if len(word) == 1:
114
- break
115
- else:
116
- pairs = get_pairs(word)
117
- word = ' '.join(word)
118
- self.cache[token] = word
119
- return word
120
-
121
- def encode(self, text):
122
- bpe_tokens = []
123
- text = whitespace_clean(basic_clean(text)).lower()
124
- for token in re.findall(self.pat, text):
125
- token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
126
- bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
127
- return bpe_tokens
128
-
129
- def decode(self, tokens):
130
- text = ''.join([self.decoder[token] for token in tokens])
131
- text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
132
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/common/gradcam.py DELETED
@@ -1,24 +0,0 @@
1
- import numpy as np
2
- from matplotlib import pyplot as plt
3
- from scipy.ndimage import filters
4
- from skimage import transform as skimage_transform
5
-
6
-
7
- def getAttMap(img, attMap, blur=True, overlap=True):
8
- attMap -= attMap.min()
9
- if attMap.max() > 0:
10
- attMap /= attMap.max()
11
- attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant")
12
- if blur:
13
- attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2]))
14
- attMap -= attMap.min()
15
- attMap /= attMap.max()
16
- cmap = plt.get_cmap("jet")
17
- attMapV = cmap(attMap)
18
- attMapV = np.delete(attMapV, 3, 2)
19
- if overlap:
20
- attMap = (
21
- 1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img
22
- + (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV
23
- )
24
- return attMap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/pybind11_tests.h DELETED
@@ -1,65 +0,0 @@
1
- #pragma once
2
- #include <pybind11/pybind11.h>
3
-
4
- #if defined(_MSC_VER) && _MSC_VER < 1910
5
- // We get some really long type names here which causes MSVC 2015 to emit warnings
6
- # pragma warning(disable: 4503) // warning C4503: decorated name length exceeded, name was truncated
7
- #endif
8
-
9
- namespace py = pybind11;
10
- using namespace pybind11::literals;
11
-
12
- class test_initializer {
13
- using Initializer = void (*)(py::module &);
14
-
15
- public:
16
- test_initializer(Initializer init);
17
- test_initializer(const char *submodule_name, Initializer init);
18
- };
19
-
20
- #define TEST_SUBMODULE(name, variable) \
21
- void test_submodule_##name(py::module &); \
22
- test_initializer name(#name, test_submodule_##name); \
23
- void test_submodule_##name(py::module &variable)
24
-
25
-
26
- /// Dummy type which is not exported anywhere -- something to trigger a conversion error
27
- struct UnregisteredType { };
28
-
29
- /// A user-defined type which is exported and can be used by any test
30
- class UserType {
31
- public:
32
- UserType() = default;
33
- UserType(int i) : i(i) { }
34
-
35
- int value() const { return i; }
36
- void set(int set) { i = set; }
37
-
38
- private:
39
- int i = -1;
40
- };
41
-
42
- /// Like UserType, but increments `value` on copy for quick reference vs. copy tests
43
- class IncType : public UserType {
44
- public:
45
- using UserType::UserType;
46
- IncType() = default;
47
- IncType(const IncType &other) : IncType(other.value() + 1) { }
48
- IncType(IncType &&) = delete;
49
- IncType &operator=(const IncType &) = delete;
50
- IncType &operator=(IncType &&) = delete;
51
- };
52
-
53
- /// Custom cast-only type that casts to a string "rvalue" or "lvalue" depending on the cast context.
54
- /// Used to test recursive casters (e.g. std::tuple, stl containers).
55
- struct RValueCaster {};
56
- PYBIND11_NAMESPACE_BEGIN(pybind11)
57
- PYBIND11_NAMESPACE_BEGIN(detail)
58
- template<> class type_caster<RValueCaster> {
59
- public:
60
- PYBIND11_TYPE_CASTER(RValueCaster, _("RValueCaster"));
61
- static handle cast(RValueCaster &&, return_value_policy, handle) { return py::str("rvalue").release(); }
62
- static handle cast(const RValueCaster &, return_value_policy, handle) { return py::str("lvalue").release(); }
63
- };
64
- PYBIND11_NAMESPACE_END(detail)
65
- PYBIND11_NAMESPACE_END(pybind11)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/losses/gaussian_focal_loss.py DELETED
@@ -1,91 +0,0 @@
1
- import mmcv
2
- import torch.nn as nn
3
-
4
- from ..builder import LOSSES
5
- from .utils import weighted_loss
6
-
7
-
8
- @mmcv.jit(derivate=True, coderize=True)
9
- @weighted_loss
10
- def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
11
- """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
12
- distribution.
13
-
14
- Args:
15
- pred (torch.Tensor): The prediction.
16
- gaussian_target (torch.Tensor): The learning target of the prediction
17
- in gaussian distribution.
18
- alpha (float, optional): A balanced form for Focal Loss.
19
- Defaults to 2.0.
20
- gamma (float, optional): The gamma for calculating the modulating
21
- factor. Defaults to 4.0.
22
- """
23
- eps = 1e-12
24
- pos_weights = gaussian_target.eq(1)
25
- neg_weights = (1 - gaussian_target).pow(gamma)
26
- pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
27
- neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
28
- return pos_loss + neg_loss
29
-
30
-
31
- @LOSSES.register_module()
32
- class GaussianFocalLoss(nn.Module):
33
- """GaussianFocalLoss is a variant of focal loss.
34
-
35
- More details can be found in the `paper
36
- <https://arxiv.org/abs/1808.01244>`_
37
- Code is modified from `kp_utils.py
38
- <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
39
- Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
40
- not 0/1 binary target.
41
-
42
- Args:
43
- alpha (float): Power of prediction.
44
- gamma (float): Power of target for negative samples.
45
- reduction (str): Options are "none", "mean" and "sum".
46
- loss_weight (float): Loss weight of current loss.
47
- """
48
-
49
- def __init__(self,
50
- alpha=2.0,
51
- gamma=4.0,
52
- reduction='mean',
53
- loss_weight=1.0):
54
- super(GaussianFocalLoss, self).__init__()
55
- self.alpha = alpha
56
- self.gamma = gamma
57
- self.reduction = reduction
58
- self.loss_weight = loss_weight
59
-
60
- def forward(self,
61
- pred,
62
- target,
63
- weight=None,
64
- avg_factor=None,
65
- reduction_override=None):
66
- """Forward function.
67
-
68
- Args:
69
- pred (torch.Tensor): The prediction.
70
- target (torch.Tensor): The learning target of the prediction
71
- in gaussian distribution.
72
- weight (torch.Tensor, optional): The weight of loss for each
73
- prediction. Defaults to None.
74
- avg_factor (int, optional): Average factor that is used to average
75
- the loss. Defaults to None.
76
- reduction_override (str, optional): The reduction method used to
77
- override the original reduction method of the loss.
78
- Defaults to None.
79
- """
80
- assert reduction_override in (None, 'none', 'mean', 'sum')
81
- reduction = (
82
- reduction_override if reduction_override else self.reduction)
83
- loss_reg = self.loss_weight * gaussian_focal_loss(
84
- pred,
85
- target,
86
- weight,
87
- alpha=self.alpha,
88
- gamma=self.gamma,
89
- reduction=reduction,
90
- avg_factor=avg_factor)
91
- return loss_reg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/drawings-to-human/frontend/src/lib/store.ts DELETED
@@ -1,17 +0,0 @@
1
- import { writable } from 'svelte/store';
2
- import type { Brush, Params, DrawingLayer } from '../types';
3
- import { randomSeed } from '$lib/utils';
4
-
5
- export const drawingLayers = writable<Map<string, DrawingLayer>>(new Map());
6
- export const resultImage = writable<string>();
7
- export const currentCanvas = writable<HTMLCanvasElement>();
8
- export const selectedImage = writable<HTMLImageElement>();
9
- export const selectedBrush = writable<Brush>();
10
- export const selectedParams = writable<Params>({
11
- texture: '',
12
- seed: randomSeed(),
13
- steps: 10
14
- });
15
-
16
- export const generateHuman = writable<boolean>(false);
17
- export const saveResult = writable<boolean>(false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CatNika/New_Cat_Proxy/greeting.md DELETED
@@ -1,14 +0,0 @@
1
- # I don't appear on any sites, don't impersonate me :< 😾 I'm not active on the 4chan site.
2
-
3
- ---
4
-
5
-
6
- # If you can verify that you're an Asian woman, I'll let you into my Discord. But please don't send me any body pictures. I'm not asking for that kind of thing. But don't try to deceive me with lies. I'll continue the conversation to confirm if you're telling the truth. Due to recent fucking crappy banwaves, my keys have been dying quite a bit, so I only plan to share it with people I want to.
7
-
8
- # Contact with me: [email protected]
9
-
10
- <div style="display: flex; align-items: center; justify-content: center; background-color: #f5e6e6; padding: 20px; border-radius: 10px; box-shadow: 0px 0px 10px rgba(0, 0, 0, 0.5);">
11
- <iframe width="800" height="500" src="https://www.youtube.com/embed/SX_ViT4Ra7k?autoplay=1&rel=0&loop=1&controls=0" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen style="border-radius: 5px;"></iframe>
12
- </div>
13
- <p style="color: #333; margin-top: 10px; font-size: 16px; font-weight: bold; text-align: center;">While you're here, immerse yourself in some of my favorite song recommendations.</p>
14
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClearLove443/Robby-chatbot/pages/3_🎬 Robby-Youtube.py DELETED
@@ -1,71 +0,0 @@
1
- import os
2
- import streamlit as st
3
- import re
4
- from modules.layout import Layout
5
- from modules.utils import Utilities
6
- from modules.sidebar import Sidebar
7
- from youtube_transcript_api import YouTubeTranscriptApi
8
- from langchain.chains.summarize import load_summarize_chain
9
- from langchain.chains import AnalyzeDocumentChain
10
- from youtube_transcript_api import YouTubeTranscriptApi
11
- from langchain.llms import OpenAI
12
- import os
13
- from langchain.text_splitter import CharacterTextSplitter
14
-
15
- st.set_page_config(layout="wide", page_icon="💬", page_title="Robby | Chat-Bot 🤖")
16
-
17
- # Instantiate the main components
18
- layout, sidebar, utils = Layout(), Sidebar(), Utilities()
19
-
20
- st.markdown(
21
- f"""
22
- <h1 style='text-align: center;'> Ask Robby to summarize youtube video ! 😁</h1>
23
- """,
24
- unsafe_allow_html=True,
25
- )
26
-
27
- user_api_key = utils.load_api_key()
28
-
29
- sidebar.about()
30
-
31
- if not user_api_key:
32
- layout.show_api_key_missing()
33
-
34
- else:
35
- os.environ["OPENAI_API_KEY"] = user_api_key
36
-
37
- script_docs = []
38
-
39
- def get_youtube_id(url):
40
- video_id = None
41
- match = re.search(r"(?<=v=)[^&#]+", url)
42
- if match :
43
- video_id = match.group()
44
- else :
45
- match = re.search(r"(?<=youtu.be/)[^&#]+", url)
46
- if match :
47
- video_id = match.group()
48
- return video_id
49
-
50
- video_url = st.text_input(placeholder="Enter Youtube Video URL", label_visibility="hidden", label =" ")
51
- if video_url :
52
- video_id = get_youtube_id(video_url)
53
-
54
- if video_id != "":
55
- t = YouTubeTranscriptApi.get_transcript(video_id, languages=('en','fr','es', 'zh-cn', 'hi', 'ar', 'bn', 'ru', 'pt', 'sw' ))
56
- finalString = ""
57
- for item in t:
58
- text = item['text']
59
- finalString += text + " "
60
-
61
- text_splitter = CharacterTextSplitter()
62
- chunks = text_splitter.split_text(finalString)
63
-
64
- summary_chain = load_summarize_chain(OpenAI(temperature=0),
65
- chain_type="map_reduce",verbose=True)
66
-
67
- summarize_document_chain = AnalyzeDocumentChain(combine_docs_chain=summary_chain)
68
-
69
- answer = summarize_document_chain.run(chunks)
70
-
71
- st.subheader(answer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CoPoBio/skin_cancer_risk_prediction/simple_vae.py DELETED
@@ -1,124 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from torch.autograd import Variable
5
- import torchvision.models as models
6
-
7
- class Encoder(nn.Module):
8
- def __init__(self, nc, nef, nz, isize, device):
9
- super(Encoder, self).__init__()
10
-
11
- # Device
12
- self.device = device
13
-
14
- # Encoder: (nc, isize, isize) -> (nef*8, isize//16, isize//16)
15
- self.encoder = nn.Sequential(
16
- nn.Conv2d(nc, nef, 4, 2, padding=1),
17
- nn.LeakyReLU(0.2, True),
18
- nn.BatchNorm2d(nef),
19
-
20
- nn.Conv2d(nef, nef*2, 4, 2, padding=1),
21
- nn.LeakyReLU(0.2, True),
22
- nn.BatchNorm2d(nef*2),
23
-
24
- nn.Conv2d(nef*2, nef*4, 4, 2, padding=1),
25
- nn.LeakyReLU(0.2, True),
26
- nn.BatchNorm2d(nef*4),
27
-
28
- nn.Conv2d(nef*4, nef*8, 4, 2, padding=1),
29
- nn.LeakyReLU(0.2, True),
30
- nn.BatchNorm2d(nef*8)
31
- )
32
-
33
- # Map the encoded feature map to the latent vector of mean, (log)variance
34
- out_size = isize // 16
35
- self.mean = nn.Linear(nef*8*out_size*out_size, nz)
36
- self.logvar = nn.Linear(nef*8*out_size*out_size, nz)
37
-
38
- def reparametrize(self, mean, logvar):
39
- std = logvar.mul(0.5).exp_()
40
- multi_norm = torch.FloatTensor(std.size()).normal_().to(self.device)
41
- multi_norm = Variable(multi_norm)
42
- return multi_norm.mul(std).add_(mean)
43
-
44
- def forward(self, inputs):
45
- # Batch size
46
- batch_size = inputs.size(0)
47
- # Encoded feature map
48
- hidden = self.encoder(inputs)
49
- # Reshape
50
- hidden = hidden.view(batch_size, -1)
51
- # Calculate mean and (log)variance
52
- mean, logvar = self.mean(hidden), self.logvar(hidden)
53
- # Sample
54
- #latent_z = self.reparametrize(mean, logvar)
55
- latent_z = mean
56
- return latent_z, mean, logvar
57
-
58
- class Decoder(nn.Module):
59
- def __init__(self, nc, ndf, nz, isize):
60
- super(Decoder, self).__init__()
61
-
62
- # Map the latent vector to the feature map space
63
- self.ndf = ndf
64
- self.out_size = isize // 16
65
- self.decoder_dense = nn.Sequential(
66
- nn.Linear(nz, ndf*8*self.out_size*self.out_size),
67
- nn.ReLU(True)
68
- )
69
- # Decoder: (ndf*8, isize//16, isize//16) -> (nc, isize, isize)
70
- self.decoder_conv = nn.Sequential(
71
- nn.UpsamplingNearest2d(scale_factor=2),
72
- nn.Conv2d(ndf*8, ndf*4, 3, padding=1),
73
- nn.LeakyReLU(0.2, True),
74
- nn.BatchNorm2d(ndf*4, 1.e-3),
75
-
76
- nn.UpsamplingNearest2d(scale_factor=2),
77
- nn.Conv2d(ndf*4, ndf*2, 3, padding=1),
78
- nn.LeakyReLU(0.2, True),
79
- nn.BatchNorm2d(ndf*2, 1.e-3),
80
-
81
- nn.UpsamplingNearest2d(scale_factor=2),
82
- nn.Conv2d(ndf*2, ndf, 3, padding=1),
83
- nn.LeakyReLU(0.2, True),
84
- nn.BatchNorm2d(ndf, 1.e-3),
85
-
86
- nn.UpsamplingNearest2d(scale_factor=2),
87
- nn.Conv2d(ndf, nc, 3, padding=1)
88
- )
89
-
90
- def forward(self, input):
91
- batch_size = input.size(0)
92
- hidden = self.decoder_dense(input).view(
93
- batch_size, self.ndf*8, self.out_size, self.out_size)
94
- output = self.decoder_conv(hidden)
95
- return output
96
-
97
- class VAE(nn.Module):
98
- def __init__(self, nc=3, ndf=32, nef=32, nz=200, isize=128, device=torch.device("cuda:0"), is_train=True):
99
- super(VAE, self).__init__()
100
-
101
- self.nz = nz
102
- # Encoder
103
- self.encoder = Encoder(nc=nc, nef=nef, nz=nz, isize=isize, device=device)
104
- # Decoder
105
- self.decoder = Decoder(nc=nc, ndf=ndf, nz=nz, isize=isize)
106
- self.pe = nn.Linear(nz, 1)
107
- if is_train == False:
108
- for param in self.encoder.parameters():
109
- param.requires_grad = False
110
- for param in self.decoder.parameters():
111
- param.requires_grad = False
112
-
113
- def forward(self, x):
114
- latent_z, mean, logvar = self.encoder(x)
115
- rec_x = self.decoder(latent_z)
116
- zp = self.pe(latent_z)
117
- return rec_x, mean, logvar, latent_z, zp
118
-
119
- def encode(self, x):
120
- latent_z, _, _ = self.encoder(x)
121
- return latent_z
122
-
123
- def decode(self, z):
124
- return self.decoder(z)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/client/css/options.css DELETED
@@ -1,10 +0,0 @@
1
- .options-container {
2
- display: flex;
3
- flex-wrap: wrap;
4
- }
5
-
6
- @media screen and (max-width: 990px) {
7
- .options-container {
8
- justify-content: space-between;
9
- }
10
- }
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/transforms/transforms.py DELETED
@@ -1,468 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import random
3
-
4
- import torch
5
- import torchvision
6
- from torchvision.transforms import functional as F
7
-
8
- from maskrcnn_benchmark.structures.bounding_box import BoxList
9
- from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask
10
-
11
- from maskrcnn_benchmark.structures.ke import textKES
12
- from maskrcnn_benchmark.structures.mty import MTY
13
- import numpy as np
14
- from PIL import Image
15
- from shapely.geometry import *
16
- import cv2
17
- from maskrcnn_benchmark.config import cfg
18
-
19
-
20
- class Compose(object):
21
- def __init__(self, transforms):
22
- self.transforms = transforms
23
-
24
- def __call__(self, image, target):
25
- for t in self.transforms:
26
- image, target = t(image, target)
27
- return image, target
28
-
29
- def __repr__(self):
30
- format_string = self.__class__.__name__ + "("
31
- for t in self.transforms:
32
- format_string += "\n"
33
- format_string += " {0}".format(t)
34
- format_string += "\n)"
35
- return format_string
36
-
37
-
38
- class Resize(object):
39
- def __init__(self, min_size, max_size):
40
- if not isinstance(min_size, (list, tuple)):
41
- min_size = (min_size,)
42
- self.min_size = min_size
43
- self.max_size = max_size
44
-
45
- # modified from torchvision to add support for max size
46
- def get_size(self, image_size):
47
-
48
- # if test ic15
49
-
50
- #oh = 1200
51
- #ow = 2000
52
- #return (oh, ow)
53
-
54
- w, h = image_size
55
- size = random.choice(self.min_size)
56
- max_size = self.max_size
57
- if max_size is not None:
58
- min_original_size = float(min((w, h)))
59
- max_original_size = float(max((w, h)))
60
- if max_original_size / min_original_size * size > max_size:
61
- size = int(round(max_size * min_original_size / max_original_size))
62
-
63
- if (w <= h and w == size) or (h <= w and h == size):
64
- return (h, w)
65
-
66
- if w < h:
67
- ow = size
68
- oh = int(size * h / w)
69
- else:
70
- oh = size
71
- ow = int(size * w / h)
72
-
73
- return (oh, ow)
74
-
75
- def __call__(self, image, target):
76
- size = self.get_size(image.size)
77
- image = F.resize(image, size)
78
- if isinstance(target, list):
79
- target = [t.resize(image.size) for t in target]
80
- else:
81
- target = target.resize(image.size)
82
- return image, target
83
-
84
-
85
- class RandomHorizontalFlip(object):
86
- def __init__(self, prob=0.5):
87
- self.prob = prob
88
-
89
- def __call__(self, image, target):
90
- if random.random() < self.prob:
91
- image = F.hflip(image)
92
- if isinstance(target, list):
93
- target = [t.transpose(0) for t in target]
94
- else:
95
- target = target.transpose(0)
96
- return image, target
97
-
98
-
99
- class ToTensor(object):
100
- def __call__(self, image, target):
101
- return F.to_tensor(image), target
102
-
103
-
104
- class Normalize(object):
105
- def __init__(self, mean, std, to_bgr255=True):
106
- self.mean = mean
107
- self.std = std
108
- self.to_bgr255 = to_bgr255
109
-
110
- def __call__(self, image, target):
111
- if self.to_bgr255:
112
- image = image[[2, 1, 0]] * 255
113
- image = F.normalize(image, mean=self.mean, std=self.std)
114
- return image, target
115
-
116
-
117
- class RandomCrop(object):
118
- """Random crop with repeatedly expanding the range to included box borders."""
119
- def __init__(self, prob, init_crop_size=(0.5, 1.0)):
120
-
121
- if (not isinstance(init_crop_size, list)) and (not isinstance(init_crop_size, tuple)):
122
- raise ValueError('Paremeter init_crop_size should be a list or tuple!')
123
- elif len(init_crop_size) != 2:
124
- raise ValueError('Length of init_crop_size should be 2!')
125
- elif not (init_crop_size[0] <= 1 and init_crop_size[0] >= 0 and init_crop_size[1] <= 1 and init_crop_size[1] >= 0):
126
- raise ValueError('Elements of init_crop_size should be within [0, 1]!')
127
- self.prob = prob
128
- self.init_crop_size = init_crop_size
129
-
130
- def __call__(self, image, target):
131
- if random.random() >= self.prob:
132
- return image, target
133
-
134
- if isinstance(target, list):
135
- target0 = target[0]
136
- else:
137
- target0 = target
138
- while True:
139
- # Initial Crop Region
140
- crop_region = self.initial_crop_region(image)
141
-
142
- # Adjust Crop Region
143
- crop_region, keep_target = self.adjust_crop_region(crop_region, target0)
144
- if crop_region is None and keep_target is None:
145
- continue
146
-
147
- if isinstance(target, list):
148
- # check empty char
149
- new_t1 = target[1].crop(crop_region)
150
- if len(new_t1) < 1: return image, target
151
-
152
- image = image.crop(crop_region.numpy())
153
- if isinstance(target, list):
154
- target0 = keep_target.crop(crop_region)
155
- others = [t.crop(crop_region, remove_empty=True) for t in target[1:]]
156
- target = [target0] + others
157
- else:
158
- target = keep_target.crop(crop_region)
159
-
160
- return image, target
161
-
162
- def initial_crop_region(self, image):
163
- width, height = image.size
164
- ratio_w, ratio_h = torch.empty(2).uniform_(self.init_crop_size[0], self.init_crop_size[1])
165
- crop_width, crop_height = int(width*ratio_w), int(height*ratio_h)
166
- crop_xmin = torch.randint(width-crop_width, (1,))
167
- crop_ymin = torch.randint(height-crop_height, (1,))
168
- crop_xmax = crop_xmin + crop_width
169
- crop_ymax = crop_ymin + crop_height
170
- crop_region = torch.Tensor([crop_xmin, crop_ymin, crop_xmax, crop_ymax])
171
- return crop_region
172
-
173
- def intersect_area(self, bbox, bboxes):
174
- inter_xmin = torch.max(bbox[0], bboxes[:, 0])
175
- inter_ymin = torch.max(bbox[1], bboxes[:, 1])
176
- inter_xmax = torch.min(bbox[2], bboxes[:, 2])
177
- inter_ymax = torch.min(bbox[3], bboxes[:, 3])
178
- inter_width = torch.max(torch.Tensor([0]), inter_xmax-inter_xmin)
179
- inter_height = torch.max(torch.Tensor([0]), inter_ymax-inter_ymin)
180
- return inter_width*inter_height
181
-
182
- def adjust_crop_region(self, crop_region, target):
183
- keep_indies_ = torch.zeros((len(target)), dtype=torch.uint8)
184
- while True:
185
- inter_area = self.intersect_area(crop_region, target.bbox)
186
- keep_indies = (inter_area > 0)
187
- if torch.sum(keep_indies) == 0:
188
- return None, None
189
- keep_target = target[keep_indies]
190
- if keep_indies.equal(keep_indies_):
191
- return crop_region, keep_target
192
- keep_bbox = keep_target.bbox
193
- crop_xmin = torch.min(crop_region[0], torch.min(keep_bbox[:, 0]))
194
- crop_ymin = torch.min(crop_region[1], torch.min(keep_bbox[:, 1]))
195
- crop_xmax = torch.max(crop_region[2], torch.max(keep_bbox[:, 2]))
196
- crop_ymax = torch.max(crop_region[3], torch.max(keep_bbox[:, 3]))
197
- crop_region = torch.Tensor([crop_xmin, crop_ymin, crop_xmax, crop_ymax])
198
- keep_indies_ = keep_indies
199
-
200
- class RandomBrightness(object):
201
- def __init__(self, prob=0.5):
202
- self.prob = prob
203
-
204
- def __call__(self, image, target):
205
- if random.random() < self.prob:
206
- brightness_factor = random.uniform(0.5, 2)
207
- image = F.adjust_brightness(image, brightness_factor)
208
- return image, target
209
-
210
- class RandomContrast(object):
211
- def __init__(self, prob=0.5):
212
- self.prob = prob
213
-
214
- def __call__(self, image, target):
215
- if random.random() < self.prob:
216
- contrast_factor = random.uniform(0.5, 2)
217
- image = F.adjust_contrast(image, contrast_factor)
218
- return image, target
219
-
220
- class RandomHue(object):
221
- def __init__(self, prob=0.5):
222
- self.prob = prob
223
-
224
- def __call__(self, image, target):
225
- if random.random() < self.prob:
226
- hue_factor = random.uniform(-0.25, 0.25)
227
- image = F.adjust_hue(image, hue_factor)
228
- return image, target
229
-
230
- class RandomSaturation(object):
231
- def __init__(self, prob=0.5):
232
- self.prob = prob
233
-
234
- def __call__(self, image, target):
235
- if random.random() < self.prob:
236
- saturation_factor = random.uniform(0.5, 2)
237
- image = F.adjust_saturation(image, saturation_factor)
238
- return image, target
239
-
240
- class RandomGamma(object):
241
- def __init__(self, prob=0.5):
242
- self.prob = prob
243
-
244
- def __call__(self, image, target):
245
- if random.random() < self.prob:
246
- gamma_factor = random.uniform(0.5, 2)
247
- image = F.adjust_gamma(image, gamma_factor)
248
- return image, target
249
-
250
-
251
- class RandomRotation(object):
252
- def __init__(self, prob = 0.3, degree = 5):
253
- self.prob = prob
254
- self.degree = degree
255
-
256
- def kes_encode(self, kes):
257
- kes_encode = []
258
- for i in kes:
259
- mnx = i[0]
260
- mny = i[1]
261
- assert(len(i)%3 == 0)
262
- npts = int(len(i)/3-2)
263
- for index in range(npts):
264
- i[3+index*3] = (i[3+index*3]+mnx)/2
265
- i[4+index*3] = (i[4+index*3]+mny)/2
266
- kes_encode.append(i)
267
- return kes_encode
268
-
269
- def kes_gen(self, kes):
270
- kes_gen_out = []
271
- for i in kes:
272
- mnx = i[0]
273
- mny = i[1]
274
- cx= i[27]
275
- cy= i[28]
276
- assert(len(i)%3 == 0)
277
- ot = [mnx, i[3],i[6],i[9],i[12], cx,\
278
- mny, i[16],i[19],i[22],i[25], cy]
279
- kes_gen_out.append(ot)
280
- return kes_gen_out
281
-
282
- def __call__(self, image, target):
283
- if random.random() < self.prob:
284
- image1 = image
285
- target1 = target
286
- img = np.array(image)
287
- w = image.size[0]
288
- h = image.size[1]
289
- pri_points = []
290
- for i in range(len(target.extra_fields['masks'].instances)):
291
- assert(len(target.extra_fields['masks'].instances[i].polygons)==1), 'one text instance should have only one polygon.'
292
- tensor_box = target.extra_fields['masks'].instances[i].polygons[0].polygons
293
-
294
- points_x = np.array([tensor_box[0][0],tensor_box[0][2],tensor_box[0][4],tensor_box[0][6]])
295
- points_y = np.array([tensor_box[0][1],tensor_box[0][3],tensor_box[0][5],tensor_box[0][7]])
296
- smaller_x = np.where(points_x <= 0)
297
- larger_x = np.where(points_x >= w)
298
- smaller_y = np.where(points_y <= 0)
299
- larger_y = np.where(points_y >= h)
300
- points_x[smaller_x] = 1
301
- points_x[larger_x] = w - 1
302
- points_y[smaller_y] = 1
303
- points_y[larger_y] = h -1
304
- pri_points.append((int(points_x[0]),int(points_y[0])))
305
- pri_points.append((int(points_x[1]),int(points_y[1])))
306
- pri_points.append((int(points_x[2]),int(points_y[2])))
307
- pri_points.append((int(points_x[3]),int(points_y[3])))
308
-
309
- #get the transform image and points
310
- height, width = img.shape[:2]
311
-
312
- # if ROTATE_DEGREE = (0,30,60,90,210,150,180,210,240,270,300,330,360)
313
- #de_ro = random.choice(self.degree)
314
- #matrix = cv2.getRotationMatrix2D((width / 2, height / 2) ,de_ro, 1.0)
315
-
316
- # if ROTATE_DEGREE = 10
317
- matrix = cv2.getRotationMatrix2D((width / 2, height / 2), random.uniform(-self.degree[0],self.degree[0]), 1.0)
318
-
319
- cos = np.abs(matrix[0,0])
320
- sin = np.abs(matrix[0,1])
321
- new_W = int((height * sin) + (width * cos))
322
- new_H = int((height * cos) + (width * sin))
323
- matrix[0,2] += (new_W/2) - width/2
324
- matrix[1,2] += ((new_H/2)) - height/2
325
- img = cv2.warpAffine(img, matrix, (new_W,new_H))
326
-
327
- change_points = []
328
- for i in range(int(len(pri_points))):
329
- x_r,y_r = cv2.transform(np.array([[pri_points[i]]]),matrix).squeeze()
330
- change_points.append([x_r,y_r])
331
-
332
- image = Image.fromarray(img)
333
-
334
- keypoints_len = len(change_points)
335
- tran_boxes = []
336
- n = keypoints_len/4
337
-
338
- for i in range(int(n)):
339
- tran_boxes.append(change_points[0 + i*4: 4 + i*4])
340
-
341
- tran_boxes = np.array(tran_boxes).reshape(-1,2)
342
- tran_x = []
343
- tran_y = []
344
- for k in range(len(tran_boxes)):
345
- tran_x.append(int(tran_boxes[k][0]))
346
- tran_y.append(int(tran_boxes[k][1]))
347
- max_x = max(tran_x)
348
- min_x = min(tran_x)
349
- max_y = max(tran_y)
350
- min_y = min(tran_x)
351
- ctr_x = new_W / 2
352
- ctr_y = new_H / 2
353
- origin_xmin = ctr_x - width / 2
354
- origin_xmax = ctr_x + width / 2
355
- origin_ymin = ctr_y - height / 2
356
- origin_ymax = ctr_y + height / 2
357
- cut_xmax = origin_xmax
358
- cut_xmin = origin_xmin
359
- cut_ymax = origin_ymax
360
- cut_ymin = origin_ymin
361
- if max_x >= origin_xmax:
362
- cut_xmax = max_x
363
- if min_x <= origin_xmin:
364
- cut_xmin = min_x
365
- if max_y >= origin_ymax:
366
- cut_ymax = max_y
367
- if min_y <= origin_ymin:
368
- cut_ymin = min_y
369
- for i in range(len(tran_boxes)):
370
- tran_x[i] = tran_x[i] - cut_xmin
371
- tran_y[i] = tran_y[i] - cut_ymin
372
- image = image.crop((cut_xmin,cut_ymin,cut_xmax,cut_ymax))
373
- tran_x = np.array(tran_x)
374
- tran_y = np.array(tran_y)
375
-
376
- boxes = []
377
- masks = []
378
- mty = []
379
- kes = []
380
- #GET FORMAT OF BOXES,MASKS
381
- for idx in range(int(tran_x.size/4)):
382
- x_points = [tran_x[4 * idx], tran_x[4*idx+1],tran_x[4*idx+2],tran_x[4*idx+3]]
383
- y_points = [tran_y[4 * idx], tran_y[4*idx+1],tran_y[4*idx+2],tran_y[4*idx+3]]
384
-
385
- l1 = LineString([(x_points[0], y_points[0]), (x_points[2], y_points[2])])
386
- l2 = LineString([(x_points[1], y_points[1]), (x_points[3], y_points[3])])
387
- p_l1l2 = l1.intersection(l2)
388
- poly1 = Polygon([(x_points[0], y_points[0]), (x_points[1], y_points[1]),
389
- (x_points[2], y_points[2]), (x_points[3], y_points[3])])
390
- if not poly1.is_valid:
391
- continue
392
- if not p_l1l2.within(poly1):
393
- continue
394
- if poly1.area <= 10:
395
- continue
396
- x_min = min(x_points)
397
- x_max = max(x_points)
398
- y_min = min(y_points)
399
- y_max = max(y_points)
400
- width = max(0, x_max - x_min + 1)
401
- height = max(0, y_max - y_min + 1)
402
- if width == 0 or height == 0:
403
- continue
404
- boxes.append([x_min,y_min,width,height])
405
-
406
- #get mask format
407
- one_point = [[tran_x[4*idx],tran_y[4*idx],tran_x[4*idx+1],tran_y[4*idx+1],tran_x[4*idx+2],tran_y[4*idx+2],tran_x[4*idx+3],tran_y[4*idx+3]]]
408
- masks.append(one_point)
409
-
410
- #get matchtype format
411
- mean_x = np.mean(x_points)
412
- mean_y = np.mean(y_points)
413
- xt_sort = np.sort(x_points)
414
- yt_sort = np.sort(y_points)
415
- xt_argsort = list(np.argsort(x_points))
416
- yt_argsort = list(np.argsort(y_points))
417
- ldx = []
418
- for ildx in range(4):
419
- ldx.append(yt_argsort.index(xt_argsort[ildx]))
420
- all_types = [[1,2,3,4],[1,2,4,3],[1,3,2,4],[1,3,4,2],[1,4,2,3],[1,4,3,2],\
421
- [2,1,3,4],[2,1,4,3],[2,3,1,4],[2,3,4,1],[2,4,1,3],[2,4,3,1],\
422
- [3,1,2,4],[3,1,4,2],[3,2,1,4],[3,2,4,1],[3,4,1,2],[3,4,2,1],\
423
- [4,1,2,3],[4,1,3,2],[4,2,1,3],[4,2,3,1],[4,3,1,2],[4,3,2,1]]
424
- all_types = [[all_types[iat][0]-1,all_types[iat][1]-1,all_types[iat][2]-1,all_types[iat][3]-1] for iat in range(24)]
425
- match_type = all_types.index(ldx)
426
- mty.append(match_type)
427
-
428
- half_x = (xt_sort + mean_x) / 2
429
- half_y = (yt_sort + mean_y) / 2
430
-
431
- keypoints = []
432
- keypoints.append(mean_x)
433
- keypoints.append(mean_y)
434
- keypoints.append(2)
435
- for i in range(4):
436
- keypoints.append(half_x[i])
437
- keypoints.append(mean_y)
438
- keypoints.append(2)
439
- for i in range(4):
440
- keypoints.append(mean_x)
441
- keypoints.append(half_y[i])
442
- keypoints.append(2)
443
- try:
444
- keypoints.append(int(p_l1l2.x))
445
- keypoints.append(int(p_l1l2.y))
446
- keypoints.append(2)
447
- except Exception as e:
448
- continue
449
- kes.append(keypoints)
450
- #IF ENCOUNTER THAT NO BOX IN A TRANSFORMED IMAGE, RETURN PRIMARY IMAGE AND TARGET
451
- if kes == []:
452
- image = image1
453
- target = target1
454
- return image,target
455
- classes = []
456
- for i in range(len(boxes)):
457
- classes.append(1)
458
- classes = torch.tensor(classes)
459
- #GET NEW TARGET
460
- boxes = torch.as_tensor(boxes).reshape(-1, 4)
461
- target = BoxList(boxes, image.size, mode="xywh").convert("xyxy")
462
-
463
- target.add_field("labels",classes)
464
-
465
- masks = SegmentationMask(masks, image.size)
466
- target.add_field("masks", masks)
467
-
468
- return image,target
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/processors/blip_processors.py DELETED
@@ -1,142 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import re
9
-
10
- from video_llama.common.registry import registry
11
- from video_llama.processors.base_processor import BaseProcessor
12
- from video_llama.processors.randaugment import RandomAugment
13
- from omegaconf import OmegaConf
14
- from torchvision import transforms
15
- from torchvision.transforms.functional import InterpolationMode
16
-
17
-
18
- class BlipImageBaseProcessor(BaseProcessor):
19
- def __init__(self, mean=None, std=None):
20
- if mean is None:
21
- mean = (0.48145466, 0.4578275, 0.40821073)
22
- if std is None:
23
- std = (0.26862954, 0.26130258, 0.27577711)
24
-
25
- self.normalize = transforms.Normalize(mean, std)
26
-
27
-
28
- @registry.register_processor("blip_caption")
29
- class BlipCaptionProcessor(BaseProcessor):
30
- def __init__(self, prompt="", max_words=50):
31
- self.prompt = prompt
32
- self.max_words = max_words
33
-
34
- def __call__(self, caption):
35
- caption = self.prompt + self.pre_caption(caption)
36
-
37
- return caption
38
-
39
- @classmethod
40
- def from_config(cls, cfg=None):
41
- if cfg is None:
42
- cfg = OmegaConf.create()
43
-
44
- prompt = cfg.get("prompt", "")
45
- max_words = cfg.get("max_words", 50)
46
-
47
- return cls(prompt=prompt, max_words=max_words)
48
-
49
- def pre_caption(self, caption):
50
- caption = re.sub(
51
- r"([.!\"()*#:;~])",
52
- " ",
53
- caption.lower(),
54
- )
55
- caption = re.sub(
56
- r"\s{2,}",
57
- " ",
58
- caption,
59
- )
60
- caption = caption.rstrip("\n")
61
- caption = caption.strip(" ")
62
-
63
- # truncate caption
64
- caption_words = caption.split(" ")
65
- if len(caption_words) > self.max_words:
66
- caption = " ".join(caption_words[: self.max_words])
67
-
68
- return caption
69
-
70
-
71
- @registry.register_processor("blip2_image_train")
72
- class Blip2ImageTrainProcessor(BlipImageBaseProcessor):
73
- def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0):
74
- super().__init__(mean=mean, std=std)
75
-
76
- self.transform = transforms.Compose(
77
- [
78
- transforms.RandomResizedCrop(
79
- image_size,
80
- scale=(min_scale, max_scale),
81
- interpolation=InterpolationMode.BICUBIC,
82
- ),
83
- transforms.ToTensor(),
84
- self.normalize,
85
- ]
86
- )
87
-
88
- def __call__(self, item):
89
- return self.transform(item)
90
-
91
- @classmethod
92
- def from_config(cls, cfg=None):
93
- if cfg is None:
94
- cfg = OmegaConf.create()
95
-
96
- image_size = cfg.get("image_size", 224)
97
-
98
- mean = cfg.get("mean", None)
99
- std = cfg.get("std", None)
100
-
101
- min_scale = cfg.get("min_scale", 0.5)
102
- max_scale = cfg.get("max_scale", 1.0)
103
-
104
- return cls(
105
- image_size=image_size,
106
- mean=mean,
107
- std=std,
108
- min_scale=min_scale,
109
- max_scale=max_scale,
110
- )
111
-
112
-
113
- @registry.register_processor("blip2_image_eval")
114
- class Blip2ImageEvalProcessor(BlipImageBaseProcessor):
115
- def __init__(self, image_size=224, mean=None, std=None):
116
- super().__init__(mean=mean, std=std)
117
-
118
- self.transform = transforms.Compose(
119
- [
120
- transforms.Resize(
121
- (image_size, image_size), interpolation=InterpolationMode.BICUBIC
122
- ),
123
- transforms.ToTensor(),
124
- self.normalize,
125
- ]
126
- )
127
-
128
- def __call__(self, item):
129
- return self.transform(item)
130
-
131
- @classmethod
132
- def from_config(cls, cfg=None):
133
- if cfg is None:
134
- cfg = OmegaConf.create()
135
-
136
- image_size = cfg.get("image_size", 224)
137
-
138
- mean = cfg.get("mean", None)
139
- std = cfg.get("std", None)
140
-
141
- return cls(image_size=image_size, mean=mean, std=std)
142
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ufoLib/utils.py DELETED
@@ -1,75 +0,0 @@
1
- """The module contains miscellaneous helpers.
2
- It's not considered part of the public ufoLib API.
3
- """
4
- import warnings
5
- import functools
6
-
7
-
8
- numberTypes = (int, float)
9
-
10
-
11
- def deprecated(msg=""):
12
- """Decorator factory to mark functions as deprecated with given message.
13
-
14
- >>> @deprecated("Enough!")
15
- ... def some_function():
16
- ... "I just print 'hello world'."
17
- ... print("hello world")
18
- >>> some_function()
19
- hello world
20
- >>> some_function.__doc__ == "I just print 'hello world'."
21
- True
22
- """
23
-
24
- def deprecated_decorator(func):
25
- @functools.wraps(func)
26
- def wrapper(*args, **kwargs):
27
- warnings.warn(
28
- f"{func.__name__} function is a deprecated. {msg}",
29
- category=DeprecationWarning,
30
- stacklevel=2,
31
- )
32
- return func(*args, **kwargs)
33
-
34
- return wrapper
35
-
36
- return deprecated_decorator
37
-
38
-
39
- # To be mixed with enum.Enum in UFOFormatVersion and GLIFFormatVersion
40
- class _VersionTupleEnumMixin:
41
- @property
42
- def major(self):
43
- return self.value[0]
44
-
45
- @property
46
- def minor(self):
47
- return self.value[1]
48
-
49
- @classmethod
50
- def _missing_(cls, value):
51
- # allow to initialize a version enum from a single (major) integer
52
- if isinstance(value, int):
53
- return cls((value, 0))
54
- # or from None to obtain the current default version
55
- if value is None:
56
- return cls.default()
57
- return super()._missing_(value)
58
-
59
- def __str__(self):
60
- return f"{self.major}.{self.minor}"
61
-
62
- @classmethod
63
- def default(cls):
64
- # get the latest defined version (i.e. the max of all versions)
65
- return max(cls.__members__.values())
66
-
67
- @classmethod
68
- def supported_versions(cls):
69
- return frozenset(cls.__members__.values())
70
-
71
-
72
- if __name__ == "__main__":
73
- import doctest
74
-
75
- doctest.testmod()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/core.py DELETED
@@ -1,682 +0,0 @@
1
- from __future__ import absolute_import, division, print_function
2
-
3
- import io
4
- import logging
5
- import os
6
- import re
7
- from glob import has_magic
8
-
9
- # for backwards compat, we export cache things from here too
10
- from .caching import ( # noqa: F401
11
- BaseCache,
12
- BlockCache,
13
- BytesCache,
14
- MMapCache,
15
- ReadAheadCache,
16
- caches,
17
- )
18
- from .compression import compr
19
- from .registry import filesystem, get_filesystem_class
20
- from .utils import (
21
- _unstrip_protocol,
22
- build_name_function,
23
- infer_compression,
24
- stringify_path,
25
- )
26
-
27
- logger = logging.getLogger("fsspec")
28
-
29
-
30
- class OpenFile:
31
- """
32
- File-like object to be used in a context
33
-
34
- Can layer (buffered) text-mode and compression over any file-system, which
35
- are typically binary-only.
36
-
37
- These instances are safe to serialize, as the low-level file object
38
- is not created until invoked using ``with``.
39
-
40
- Parameters
41
- ----------
42
- fs: FileSystem
43
- The file system to use for opening the file. Should be a subclass or duck-type
44
- with ``fsspec.spec.AbstractFileSystem``
45
- path: str
46
- Location to open
47
- mode: str like 'rb', optional
48
- Mode of the opened file
49
- compression: str or None, optional
50
- Compression to apply
51
- encoding: str or None, optional
52
- The encoding to use if opened in text mode.
53
- errors: str or None, optional
54
- How to handle encoding errors if opened in text mode.
55
- newline: None or str
56
- Passed to TextIOWrapper in text mode, how to handle line endings.
57
- autoopen: bool
58
- If True, calls open() immediately. Mostly used by pickle
59
- pos: int
60
- If given and autoopen is True, seek to this location immediately
61
- """
62
-
63
- def __init__(
64
- self,
65
- fs,
66
- path,
67
- mode="rb",
68
- compression=None,
69
- encoding=None,
70
- errors=None,
71
- newline=None,
72
- ):
73
- self.fs = fs
74
- self.path = path
75
- self.mode = mode
76
- self.compression = get_compression(path, compression)
77
- self.encoding = encoding
78
- self.errors = errors
79
- self.newline = newline
80
- self.fobjects = []
81
-
82
- def __reduce__(self):
83
- return (
84
- OpenFile,
85
- (
86
- self.fs,
87
- self.path,
88
- self.mode,
89
- self.compression,
90
- self.encoding,
91
- self.errors,
92
- self.newline,
93
- ),
94
- )
95
-
96
- def __repr__(self):
97
- return "<OpenFile '{}'>".format(self.path)
98
-
99
- def __enter__(self):
100
- mode = self.mode.replace("t", "").replace("b", "") + "b"
101
-
102
- f = self.fs.open(self.path, mode=mode)
103
-
104
- self.fobjects = [f]
105
-
106
- if self.compression is not None:
107
- compress = compr[self.compression]
108
- f = compress(f, mode=mode[0])
109
- self.fobjects.append(f)
110
-
111
- if "b" not in self.mode:
112
- # assume, for example, that 'r' is equivalent to 'rt' as in builtin
113
- f = PickleableTextIOWrapper(
114
- f, encoding=self.encoding, errors=self.errors, newline=self.newline
115
- )
116
- self.fobjects.append(f)
117
-
118
- return self.fobjects[-1]
119
-
120
- def __exit__(self, *args):
121
- self.close()
122
-
123
- @property
124
- def full_name(self):
125
- return _unstrip_protocol(self.path, self.fs)
126
-
127
- def open(self):
128
- """Materialise this as a real open file without context
129
-
130
- The OpenFile object should be explicitly closed to avoid enclosed file
131
- instances persisting. You must, therefore, keep a reference to the OpenFile
132
- during the life of the file-like it generates.
133
- """
134
- return self.__enter__()
135
-
136
- def close(self):
137
- """Close all encapsulated file objects"""
138
- for f in reversed(self.fobjects):
139
- if "r" not in self.mode and not f.closed:
140
- f.flush()
141
- f.close()
142
- self.fobjects.clear()
143
-
144
-
145
- class OpenFiles(list):
146
- """List of OpenFile instances
147
-
148
- Can be used in a single context, which opens and closes all of the
149
- contained files. Normal list access to get the elements works as
150
- normal.
151
-
152
- A special case is made for caching filesystems - the files will
153
- be down/uploaded together at the start or end of the context, and
154
- this may happen concurrently, if the target filesystem supports it.
155
- """
156
-
157
- def __init__(self, *args, mode="rb", fs=None):
158
- self.mode = mode
159
- self.fs = fs
160
- self.files = []
161
- super().__init__(*args)
162
-
163
- def __enter__(self):
164
- if self.fs is None:
165
- raise ValueError("Context has already been used")
166
-
167
- fs = self.fs
168
- while True:
169
- if hasattr(fs, "open_many"):
170
- # check for concurrent cache download; or set up for upload
171
- self.files = fs.open_many(self)
172
- return self.files
173
- if hasattr(fs, "fs") and fs.fs is not None:
174
- fs = fs.fs
175
- else:
176
- break
177
- return [s.__enter__() for s in self]
178
-
179
- def __exit__(self, *args):
180
- fs = self.fs
181
- [s.__exit__(*args) for s in self]
182
- if "r" not in self.mode:
183
- while True:
184
- if hasattr(fs, "open_many"):
185
- # check for concurrent cache upload
186
- fs.commit_many(self.files)
187
- return
188
- if hasattr(fs, "fs") and fs.fs is not None:
189
- fs = fs.fs
190
- else:
191
- break
192
-
193
- def __getitem__(self, item):
194
- out = super().__getitem__(item)
195
- if isinstance(item, slice):
196
- return OpenFiles(out, mode=self.mode, fs=self.fs)
197
- return out
198
-
199
- def __repr__(self):
200
- return "<List of %s OpenFile instances>" % len(self)
201
-
202
-
203
- def open_files(
204
- urlpath,
205
- mode="rb",
206
- compression=None,
207
- encoding="utf8",
208
- errors=None,
209
- name_function=None,
210
- num=1,
211
- protocol=None,
212
- newline=None,
213
- auto_mkdir=True,
214
- expand=True,
215
- **kwargs,
216
- ):
217
- """Given a path or paths, return a list of ``OpenFile`` objects.
218
-
219
- For writing, a str path must contain the "*" character, which will be filled
220
- in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
221
-
222
- For either reading or writing, can instead provide explicit list of paths.
223
-
224
- Parameters
225
- ----------
226
- urlpath: string or list
227
- Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
228
- to read from alternative filesystems. To read from multiple files you
229
- can pass a globstring or a list of paths, with the caveat that they
230
- must all have the same protocol.
231
- mode: 'rb', 'wt', etc.
232
- compression: string or None
233
- If given, open file using compression codec. Can either be a compression
234
- name (a key in ``fsspec.compression.compr``) or "infer" to guess the
235
- compression from the filename suffix.
236
- encoding: str
237
- For text mode only
238
- errors: None or str
239
- Passed to TextIOWrapper in text mode
240
- name_function: function or None
241
- if opening a set of files for writing, those files do not yet exist,
242
- so we need to generate their names by formatting the urlpath for
243
- each sequence number
244
- num: int [1]
245
- if writing mode, number of files we expect to create (passed to
246
- name+function)
247
- protocol: str or None
248
- If given, overrides the protocol found in the URL.
249
- newline: bytes or None
250
- Used for line terminator in text mode. If None, uses system default;
251
- if blank, uses no translation.
252
- auto_mkdir: bool (True)
253
- If in write mode, this will ensure the target directory exists before
254
- writing, by calling ``fs.mkdirs(exist_ok=True)``.
255
- expand: bool
256
- **kwargs: dict
257
- Extra options that make sense to a particular storage connection, e.g.
258
- host, port, username, password, etc.
259
-
260
- Examples
261
- --------
262
- >>> files = open_files('2015-*-*.csv') # doctest: +SKIP
263
- >>> files = open_files(
264
- ... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
265
- ... ) # doctest: +SKIP
266
-
267
- Returns
268
- -------
269
- An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
270
- be used as a single context
271
-
272
- Notes
273
- -----
274
- For a full list of the available protocols and the implementations that
275
- they map across to see the latest online documentation:
276
-
277
- - For implementations built into ``fsspec`` see
278
- https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
279
- - For implementations in separate packages see
280
- https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
281
- """
282
- fs, fs_token, paths = get_fs_token_paths(
283
- urlpath,
284
- mode,
285
- num=num,
286
- name_function=name_function,
287
- storage_options=kwargs,
288
- protocol=protocol,
289
- expand=expand,
290
- )
291
- if "r" not in mode and auto_mkdir:
292
- parents = {fs._parent(path) for path in paths}
293
- [fs.makedirs(parent, exist_ok=True) for parent in parents]
294
- return OpenFiles(
295
- [
296
- OpenFile(
297
- fs,
298
- path,
299
- mode=mode,
300
- compression=compression,
301
- encoding=encoding,
302
- errors=errors,
303
- newline=newline,
304
- )
305
- for path in paths
306
- ],
307
- mode=mode,
308
- fs=fs,
309
- )
310
-
311
-
312
- def _un_chain(path, kwargs):
313
- x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
314
- bits = (
315
- [p if "://" in p or x.match(p) else p + "://" for p in path.split("::")]
316
- if "::" in path
317
- else [path]
318
- )
319
- # [[url, protocol, kwargs], ...]
320
- out = []
321
- previous_bit = None
322
- kwargs = kwargs.copy()
323
- for bit in reversed(bits):
324
- protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
325
- cls = get_filesystem_class(protocol)
326
- extra_kwargs = cls._get_kwargs_from_urls(bit)
327
- kws = kwargs.pop(protocol, {})
328
- if bit is bits[0]:
329
- kws.update(kwargs)
330
- kw = dict(**extra_kwargs, **kws)
331
- bit = cls._strip_protocol(bit)
332
- if (
333
- protocol in {"blockcache", "filecache", "simplecache"}
334
- and "target_protocol" not in kw
335
- ):
336
- bit = previous_bit
337
- out.append((bit, protocol, kw))
338
- previous_bit = bit
339
- out = list(reversed(out))
340
- return out
341
-
342
-
343
- def url_to_fs(url, **kwargs):
344
- """
345
- Turn fully-qualified and potentially chained URL into filesystem instance
346
-
347
- Parameters
348
- ----------
349
- url : str
350
- The fsspec-compatible URL
351
- **kwargs: dict
352
- Extra options that make sense to a particular storage connection, e.g.
353
- host, port, username, password, etc.
354
-
355
- Returns
356
- -------
357
- filesystem : FileSystem
358
- The new filesystem discovered from ``url`` and created with
359
- ``**kwargs``.
360
- urlpath : str
361
- The file-systems-specific URL for ``url``.
362
- """
363
- chain = _un_chain(url, kwargs)
364
- inkwargs = {}
365
- # Reverse iterate the chain, creating a nested target_* structure
366
- for i, ch in enumerate(reversed(chain)):
367
- urls, protocol, kw = ch
368
- if i == len(chain) - 1:
369
- inkwargs = dict(**kw, **inkwargs)
370
- continue
371
- inkwargs["target_options"] = dict(**kw, **inkwargs)
372
- inkwargs["target_protocol"] = protocol
373
- inkwargs["fo"] = urls
374
- urlpath, protocol, _ = chain[0]
375
- fs = filesystem(protocol, **inkwargs)
376
- return fs, urlpath
377
-
378
-
379
- def open(
380
- urlpath,
381
- mode="rb",
382
- compression=None,
383
- encoding="utf8",
384
- errors=None,
385
- protocol=None,
386
- newline=None,
387
- **kwargs,
388
- ):
389
- """Given a path or paths, return one ``OpenFile`` object.
390
-
391
- Parameters
392
- ----------
393
- urlpath: string or list
394
- Absolute or relative filepath. Prefix with a protocol like ``s3://``
395
- to read from alternative filesystems. Should not include glob
396
- character(s).
397
- mode: 'rb', 'wt', etc.
398
- compression: string or None
399
- If given, open file using compression codec. Can either be a compression
400
- name (a key in ``fsspec.compression.compr``) or "infer" to guess the
401
- compression from the filename suffix.
402
- encoding: str
403
- For text mode only
404
- errors: None or str
405
- Passed to TextIOWrapper in text mode
406
- protocol: str or None
407
- If given, overrides the protocol found in the URL.
408
- newline: bytes or None
409
- Used for line terminator in text mode. If None, uses system default;
410
- if blank, uses no translation.
411
- **kwargs: dict
412
- Extra options that make sense to a particular storage connection, e.g.
413
- host, port, username, password, etc.
414
-
415
- Examples
416
- --------
417
- >>> openfile = open('2015-01-01.csv') # doctest: +SKIP
418
- >>> openfile = open(
419
- ... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
420
- ... ) # doctest: +SKIP
421
- >>> with openfile as f:
422
- ... df = pd.read_csv(f) # doctest: +SKIP
423
- ...
424
-
425
- Returns
426
- -------
427
- ``OpenFile`` object.
428
-
429
- Notes
430
- -----
431
- For a full list of the available protocols and the implementations that
432
- they map across to see the latest online documentation:
433
-
434
- - For implementations built into ``fsspec`` see
435
- https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
436
- - For implementations in separate packages see
437
- https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
438
- """
439
- out = open_files(
440
- urlpath=[urlpath],
441
- mode=mode,
442
- compression=compression,
443
- encoding=encoding,
444
- errors=errors,
445
- protocol=protocol,
446
- newline=newline,
447
- expand=False,
448
- **kwargs,
449
- )
450
- if not out:
451
- raise FileNotFoundError(urlpath)
452
- return out[0]
453
-
454
-
455
- def open_local(url, mode="rb", **storage_options):
456
- """Open file(s) which can be resolved to local
457
-
458
- For files which either are local, or get downloaded upon open
459
- (e.g., by file caching)
460
-
461
- Parameters
462
- ----------
463
- url: str or list(str)
464
- mode: str
465
- Must be read mode
466
- storage_options:
467
- passed on to FS for or used by open_files (e.g., compression)
468
- """
469
- if "r" not in mode:
470
- raise ValueError("Can only ensure local files when reading")
471
- of = open_files(url, mode=mode, **storage_options)
472
- if not getattr(of[0].fs, "local_file", False):
473
- raise ValueError(
474
- "open_local can only be used on a filesystem which"
475
- " has attribute local_file=True"
476
- )
477
- with of as files:
478
- paths = [f.name for f in files]
479
- if isinstance(url, str) and not has_magic(url):
480
- return paths[0]
481
- return paths
482
-
483
-
484
- def get_compression(urlpath, compression):
485
- if compression == "infer":
486
- compression = infer_compression(urlpath)
487
- if compression is not None and compression not in compr:
488
- raise ValueError("Compression type %s not supported" % compression)
489
- return compression
490
-
491
-
492
- def split_protocol(urlpath):
493
- """Return protocol, path pair"""
494
- urlpath = stringify_path(urlpath)
495
- if "://" in urlpath:
496
- protocol, path = urlpath.split("://", 1)
497
- if len(protocol) > 1:
498
- # excludes Windows paths
499
- return protocol, path
500
- return None, urlpath
501
-
502
-
503
- def strip_protocol(urlpath):
504
- """Return only path part of full URL, according to appropriate backend"""
505
- protocol, _ = split_protocol(urlpath)
506
- cls = get_filesystem_class(protocol)
507
- return cls._strip_protocol(urlpath)
508
-
509
-
510
- def expand_paths_if_needed(paths, mode, num, fs, name_function):
511
- """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
512
- in them (read mode).
513
-
514
- :param paths: list of paths
515
- mode: str
516
- Mode in which to open files.
517
- num: int
518
- If opening in writing mode, number of files we expect to create.
519
- fs: filesystem object
520
- name_function: callable
521
- If opening in writing mode, this callable is used to generate path
522
- names. Names are generated for each partition by
523
- ``urlpath.replace('*', name_function(partition_index))``.
524
- :return: list of paths
525
- """
526
- expanded_paths = []
527
- paths = list(paths)
528
-
529
- if "w" in mode: # read mode
530
- if sum([1 for p in paths if "*" in p]) > 1:
531
- raise ValueError(
532
- "When writing data, only one filename mask can be specified."
533
- )
534
- num = max(num, len(paths))
535
-
536
- for curr_path in paths:
537
- if "*" in curr_path:
538
- # expand using name_function
539
- expanded_paths.extend(_expand_paths(curr_path, name_function, num))
540
- else:
541
- expanded_paths.append(curr_path)
542
- # if we generated more paths that asked for, trim the list
543
- if len(expanded_paths) > num:
544
- expanded_paths = expanded_paths[:num]
545
-
546
- else: # read mode
547
- for curr_path in paths:
548
- if has_magic(curr_path):
549
- # expand using glob
550
- expanded_paths.extend(fs.glob(curr_path))
551
- else:
552
- expanded_paths.append(curr_path)
553
-
554
- return expanded_paths
555
-
556
-
557
- def get_fs_token_paths(
558
- urlpath,
559
- mode="rb",
560
- num=1,
561
- name_function=None,
562
- storage_options=None,
563
- protocol=None,
564
- expand=True,
565
- ):
566
- """Filesystem, deterministic token, and paths from a urlpath and options.
567
-
568
- Parameters
569
- ----------
570
- urlpath: string or iterable
571
- Absolute or relative filepath, URL (may include protocols like
572
- ``s3://``), or globstring pointing to data.
573
- mode: str, optional
574
- Mode in which to open files.
575
- num: int, optional
576
- If opening in writing mode, number of files we expect to create.
577
- name_function: callable, optional
578
- If opening in writing mode, this callable is used to generate path
579
- names. Names are generated for each partition by
580
- ``urlpath.replace('*', name_function(partition_index))``.
581
- storage_options: dict, optional
582
- Additional keywords to pass to the filesystem class.
583
- protocol: str or None
584
- To override the protocol specifier in the URL
585
- expand: bool
586
- Expand string paths for writing, assuming the path is a directory
587
- """
588
- if isinstance(urlpath, (list, tuple, set)):
589
- if not urlpath:
590
- raise ValueError("empty urlpath sequence")
591
- urlpath0 = stringify_path(list(urlpath)[0])
592
- else:
593
- urlpath0 = stringify_path(urlpath)
594
- storage_options = storage_options or {}
595
- if protocol:
596
- storage_options["protocol"] = protocol
597
- chain = _un_chain(urlpath0, storage_options or {})
598
- inkwargs = {}
599
- # Reverse iterate the chain, creating a nested target_* structure
600
- for i, ch in enumerate(reversed(chain)):
601
- urls, nested_protocol, kw = ch
602
- if i == len(chain) - 1:
603
- inkwargs = dict(**kw, **inkwargs)
604
- continue
605
- inkwargs["target_options"] = dict(**kw, **inkwargs)
606
- inkwargs["target_protocol"] = nested_protocol
607
- inkwargs["fo"] = urls
608
- paths, protocol, _ = chain[0]
609
- fs = filesystem(protocol, **inkwargs)
610
- if isinstance(urlpath, (list, tuple, set)):
611
- pchains = [
612
- _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
613
- ]
614
- if len(set(pc[1] for pc in pchains)) > 1:
615
- raise ValueError("Protocol mismatch getting fs from %s", urlpath)
616
- paths = [pc[0] for pc in pchains]
617
- else:
618
- paths = fs._strip_protocol(paths)
619
- if isinstance(paths, (list, tuple, set)):
620
- paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
621
- else:
622
- if "w" in mode and expand:
623
- paths = _expand_paths(paths, name_function, num)
624
- elif "*" in paths:
625
- paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
626
- else:
627
- paths = [paths]
628
-
629
- return fs, fs._fs_token, paths
630
-
631
-
632
- def _expand_paths(path, name_function, num):
633
- if isinstance(path, str):
634
- if path.count("*") > 1:
635
- raise ValueError("Output path spec must contain exactly one '*'.")
636
- elif "*" not in path:
637
- path = os.path.join(path, "*.part")
638
-
639
- if name_function is None:
640
- name_function = build_name_function(num - 1)
641
-
642
- paths = [path.replace("*", name_function(i)) for i in range(num)]
643
- if paths != sorted(paths):
644
- logger.warning(
645
- "In order to preserve order between partitions"
646
- " paths created with ``name_function`` should "
647
- "sort to partition order"
648
- )
649
- elif isinstance(path, (tuple, list)):
650
- assert len(path) == num
651
- paths = list(path)
652
- else:
653
- raise ValueError(
654
- "Path should be either\n"
655
- "1. A list of paths: ['foo.json', 'bar.json', ...]\n"
656
- "2. A directory: 'foo/\n"
657
- "3. A path with a '*' in it: 'foo.*.json'"
658
- )
659
- return paths
660
-
661
-
662
- class PickleableTextIOWrapper(io.TextIOWrapper):
663
- """TextIOWrapper cannot be pickled. This solves it.
664
-
665
- Requires that ``buffer`` be pickleable, which all instances of
666
- AbstractBufferedFile are.
667
- """
668
-
669
- def __init__(
670
- self,
671
- buffer,
672
- encoding=None,
673
- errors=None,
674
- newline=None,
675
- line_buffering=False,
676
- write_through=False,
677
- ):
678
- self.args = buffer, encoding, errors, newline, line_buffering, write_through
679
- super().__init__(*self.args)
680
-
681
- def __reduce__(self):
682
- return PickleableTextIOWrapper, self.args
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dabs/Floyd-Steinberg-Dithering/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Floyd Steinberg Dithering
3
- emoji: 🐠
4
- colorFrom: green
5
- colorTo: blue
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaleChen/AutoGPT/tests/unit/test_commands.py DELETED
@@ -1,22 +0,0 @@
1
- """Unit tests for the commands module"""
2
- from unittest.mock import MagicMock, patch
3
-
4
- import pytest
5
-
6
- import autogpt.agent.agent_manager as agent_manager
7
- from autogpt.app import execute_command, list_agents, start_agent
8
-
9
-
10
- @pytest.mark.integration_test
11
- def test_make_agent() -> None:
12
- """Test the make_agent command"""
13
- with patch("openai.ChatCompletion.create") as mock:
14
- obj = MagicMock()
15
- obj.response.choices[0].messages[0].content = "Test message"
16
- mock.return_value = obj
17
- start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
18
- agents = list_agents()
19
- assert "List of agents:\n0: chat" == agents
20
- start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
21
- agents = list_agents()
22
- assert "List of agents:\n0: chat\n1: write" == agents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/dataset/communal/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- """
2
- @Date: 2021/09/22
3
- @description:
4
- """
 
 
 
 
 
spaces/DimaKoshman/MovieRecommender/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: MovieRecommender
3
- emoji: 🌖
4
- colorFrom: gray
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.8.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/training/coaches/multi_id_coach.py DELETED
@@ -1,73 +0,0 @@
1
- import os
2
- import sys
3
- sys.path.append('.')
4
- import torch
5
- from tqdm import tqdm
6
-
7
- from PTI.configs import paths_config, hyperparameters, global_config
8
- from PTI.training.coaches.base_coach import BaseCoach
9
- from PTI.utils.log_utils import log_images_from_w
10
-
11
-
12
- class MultiIDCoach(BaseCoach):
13
-
14
- def __init__(self, data_loader, use_wandb):
15
- super().__init__(data_loader, use_wandb)
16
-
17
- def train(self):
18
- self.G.synthesis.train()
19
- self.G.mapping.train()
20
-
21
- w_path_dir = f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}'
22
- os.makedirs(w_path_dir, exist_ok=True)
23
- os.makedirs(f'{w_path_dir}/{paths_config.pti_results_keyword}', exist_ok=True)
24
-
25
- use_ball_holder = True
26
- w_pivots = []
27
- images = []
28
-
29
- for fname, image in self.data_loader:
30
- if self.image_counter >= hyperparameters.max_images_to_invert:
31
- break
32
-
33
- image_name = fname[0]
34
- if hyperparameters.first_inv_type == 'w+':
35
- embedding_dir = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}'
36
- else:
37
- embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
38
- os.makedirs(embedding_dir, exist_ok=True)
39
-
40
- w_pivot = self.get_inversion(w_path_dir, image_name, image)
41
- w_pivots.append(w_pivot)
42
- images.append((image_name, image))
43
- self.image_counter += 1
44
-
45
- for i in tqdm(range(hyperparameters.max_pti_steps)):
46
- self.image_counter = 0
47
-
48
- for data, w_pivot in zip(images, w_pivots):
49
- image_name, image = data
50
-
51
- if self.image_counter >= hyperparameters.max_images_to_invert:
52
- break
53
-
54
- real_images_batch = image.to(global_config.device)
55
-
56
- generated_images = self.forward(w_pivot)
57
- loss, l2_loss_val, loss_lpips = self.calc_loss(generated_images, real_images_batch, image_name,
58
- self.G, use_ball_holder, w_pivot)
59
-
60
- self.optimizer.zero_grad()
61
- loss.backward()
62
- self.optimizer.step()
63
-
64
- use_ball_holder = global_config.training_step % hyperparameters.locality_regularization_interval == 0
65
-
66
- global_config.training_step += 1
67
- self.image_counter += 1
68
-
69
- if self.use_wandb:
70
- log_images_from_w(w_pivots, self.G, [image[0] for image in images])
71
-
72
- torch.save(self.G,
73
- f'{paths_config.checkpoints_dir}/model_{global_config.run_name}_multi_id.pt')