parquet-converter commited on
Commit
9149177
·
1 Parent(s): 0694f6e

Update parquet files (step 67 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/Battlefield 3 Game Files Part35.rar.md +0 -6
  2. spaces/1phancelerku/anime-remove-background/Corra com carros e motos brasileiros em Estilo BR Download grtis do mod com dinheiro infinito e mediafre.md +0 -109
  3. spaces/1phancelerku/anime-remove-background/Download Velocity Rush Z Mod APK and Enjoy Unlimited Action and Money.md +0 -128
  4. spaces/1phancelerku/anime-remove-background/Free Download Hitman Sniper APK - Play the Tactical Sniper Mission Game on Android.md +0 -12
  5. spaces/2023Liu2023/bingo/src/lib/bots/bing/tts.ts +0 -82
  6. spaces/2ndelement/voicevox/voicevox_engine/utility/core_version_utility.py +0 -14
  7. spaces/801artistry/RVC801/utils/backups.py +0 -141
  8. spaces/AI4PD/hexviz/hexviz/ec_number.py +0 -9
  9. spaces/Abhilashvj/planogram-compliance/utils/loss.py +0 -291
  10. spaces/AchyuthGamer/ImMagician-Image-Generator/style.css +0 -24
  11. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/dpt_depth.py +0 -109
  12. spaces/Addai/Breast_cancer_detection_with_deep_transfer_learning/README.md +0 -13
  13. spaces/Aditya9790/yolo7-object-tracking/models/__init__.py +0 -1
  14. spaces/AgentVerse/agentVerse/ui/src/classes/event_center.ts +0 -5
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/localforage-files.d.ts +0 -2
  16. spaces/AkashKhamkar/QnA-generator/before_run.py +0 -6
  17. spaces/Ame42/UBTH/utils.py +0 -132
  18. spaces/Andy1621/uniformer_image_detection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py +0 -63
  19. spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/hrnet.py +0 -537
  20. spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/image_degradation/bsrgan_light.py +0 -651
  21. spaces/Aqdas/YouTube_Video_OpenAI_whisper/app.py +0 -17
  22. spaces/BAAI/AltDiffusion/header.html +0 -43
  23. spaces/BIASLab/sars-cov-2-classification-fcgr/src/pipeline.py +0 -85
  24. spaces/Bart92/RVC_HF/guidml.py +0 -710
  25. spaces/Benson/text-generation/Examples/Botn Fiebre Descargar Pc.md +0 -58
  26. spaces/Benson/text-generation/Examples/Cazador Asesino 2 Apk Descargar.md +0 -67
  27. spaces/BhagatSurya/convet_pdf_to_txt/README.md +0 -12
  28. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py +0 -600
  29. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/plugin.py +0 -88
  30. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/exceptions.py +0 -267
  31. spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/dataset.py +0 -210
  32. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/deploy/README.md +0 -9
  33. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa_inference_wrapper.py +0 -153
  34. spaces/CVPR/LIVE/pybind11/tests/test_pickling.cpp +0 -130
  35. spaces/CVPR/LIVE/thrust/thrust/mismatch.h +0 -260
  36. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/uninitialized_fill.h +0 -44
  37. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/reduce.h +0 -54
  38. spaces/CikeyQI/Yunzai/Yunzai/lib/modules/oicq/index.js +0 -67
  39. spaces/CofAI/chat/client/js/icons.js +0 -1
  40. spaces/Cpp4App/Cpp4App/CDM/result_processing/view_gt.py +0 -89
  41. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/_utils.py +0 -39
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/bar_plot.py +0 -377
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-f8a15c0a.js +0 -2
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/lfs.py +0 -496
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_runtime.py +0 -328
  46. spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/pages/04_🔊_Upload_Audio_File.py +0 -205
  47. spaces/Davidsamuel101/PPTGenerator/README.md +0 -14
  48. spaces/Eddycrack864/Applio-Inference/tensorlowest.py +0 -123
  49. spaces/EuroPython2022/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/models/yolo.py +0 -283
  50. spaces/FSDL-Fashion/fashion_img_search/app.py +0 -1
spaces/1gistliPinn/ChatGPT4/Examples/Battlefield 3 Game Files Part35.rar.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>battlefield 3 game files part35.rar</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;>>> <a href="https://imgfil.com/2uxYmY">https://imgfil.com/2uxYmY</a></b></p><br /><br />
2
- <br />
3
- battlefield 3 gamefiles.part35.rar download battlefield 3 save game file download battlefield 3 save game files download pc battlefield 4 save ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Corra com carros e motos brasileiros em Estilo BR Download grtis do mod com dinheiro infinito e mediafre.md DELETED
@@ -1,109 +0,0 @@
1
-
2
- <h1>Estilo BR: How to Download and Play the Ultimate Drag Racing Game in Brazil</h1> | <p>If you are a racing enthusiast in Brazil, you have probably heard of Estilo BR, the definitive drag racing game for Android devices. With 43 different vehicles to choose from, all Brazilian, from the most classic to the most modern, you can experience the thrill of high-speed racing against competitors from around the world, including motorcycles, trucks and trailers.</p>
3
- <h2>estilo br dinheiro infinito download mediafıre</h2><br /><p><b><b>Download Zip</b> &raquo;&raquo;&raquo; <a href="https://jinyurl.com/2uNPfK">https://jinyurl.com/2uNPfK</a></b></p><br /><br />
4
- <p>In Estilo BR, you can participate in global multiplayer races with up to 500 players, both in an open world global room and in private rooms created to play with friends. Compete against drivers from different countries and show your skills on the track, enjoying the style and culture of street racing in Brazil.</p>
5
- <p>But Estilo BR is not just about racing. You can also customize your vehicles with a wide variety of aesthetic and performance upgrades. From custom paint jobs to engine modifications, you have the freedom to make your vehicles truly unique.</p>
6
- <p>Estilo BR is the best of its kind in Brazil, offering an unparalleled racing experience. Whether you are a seasoned veteran or a new player, Estilo BR has something for everyone. Download now and join the drag racing revolution in Brazil, listening to your favorite music while you play.</p>
7
- <h2>What is Estilo BR?</h2>
8
- <p>Estilo BR is a mobile game developed by RF Entertainment, a Brazilian indie studio that specializes in racing games. The game was released in 2019 and has since received several updates and improvements.</p>
9
- <p>The game is inspired by the real-life street racing scene in Brazil, where drivers compete in illegal drag races with modified cars and bikes. The game features realistic physics and responsive controls, as well as stunning pixel art graphics that create a nostalgic atmosphere.</p>
10
- <p>The game also allows you to play music from your own phone, giving you the possibility to listen to your favorite songs while playing. You can choose from different genres and playlists, or create your own custom mix.</p>
11
- <p>estilo br apk mod dinheiro infinito<br />
12
- estilo br hack diamantes infinitos 2021<br />
13
- estilo br atualizado 2021 download mediafire<br />
14
- estilo br com carros brasileiros e rachas<br />
15
- estilo br grau de moto e corridas<br />
16
- estilo br rio de janeiro e brasília<br />
17
- estilo br multiplayer com outros players<br />
18
- estilo br personalizar carro ou moto<br />
19
- estilo br fusca opala golf uno<br />
20
- estilo br apk obb dinheiro infinito<br />
21
- estilo br mod menu diamantes infinitos<br />
22
- estilo br versão mais recente download mediafire<br />
23
- estilo br rachas de tunados brasil<br />
24
- estilo br arrancadas e manobras<br />
25
- estilo br 4 novos veículos e correções de bugs<br />
26
- estilo br como instalar apk + obb<br />
27
- estilo br youtube dinheiro infinito<br />
28
- estilo br mediafire link direto sem anúncios<br />
29
- estilo br dicas e truques para ganhar dinheiro<br />
30
- estilo br gameplay e review 2021<br />
31
- estilo br baixar grátis para android<br />
32
- estilo br mod apk unlimited money and diamonds<br />
33
- estilo br hack apk download mediafire 2021<br />
34
- estilo br brazilian cars and races<br />
35
- estilo br wheelie and drag racing<br />
36
- estilo br rio de janeiro and brasilia maps<br />
37
- estilo br multiplayer with other players online<br />
38
- estilo br customize car or bike<br />
39
- estilo br beetle opala golf uno cars<br />
40
- estilo br apk obb unlimited money<br />
41
- estilo br mod menu unlimited diamonds<br />
42
- estilo br latest version download mediafire<br />
43
- estilo br drag racing brazil game<br />
44
- estilo br stunts and tricks<br />
45
- estilo br 4 new vehicles and bug fixes<br />
46
- estilo br how to install apk + obb file<br />
47
- estilo br youtube unlimited money hack<br />
48
- estilo br mediafire direct link no ads<br />
49
- estilo br tips and tricks to earn money fast<br />
50
- estilo br gameplay and review 2021 video<br />
51
- download do jogo estilo br dinheiro infinito mediafire <br />
52
- baixar o jogo estilo br diamantes infinitos mediafire <br />
53
- como baixar e instalar o jogo estilo br dinheiro infinito <br />
54
- como jogar o jogo estilo br diamantes infinitos online <br />
55
- como personalizar o seu carro ou moto no jogo estilo br <br />
56
- como ganhar rachas e manobras no jogo estilo br <br />
57
- quais são os melhores carros e motos do jogo estilo br <br />
58
- quais são os novos veículos e atualizações do jogo estilo br <br />
59
- qual é a versão mais atualizada do jogo estilo br <br />
60
- qual é o link do mediafire para baixar o jogo estilo br</p>
61
- <p>The game has a rating of 4.2 out of 5 stars on Google Play Store, with over 5 million downloads and more than 130 thousand reviews. The game is free to play, but it contains ads and in-app purchases.</p>
62
- <h2>How to download Estilo BR from mediafıre?</h2>
63
- <p>If you want to download Estilo BR from mediafıre, a popular file-sharing platform, you will need to follow these steps:</p>
64
- <ol>
65
- <li>Go to this link: <a href="(^1^)">Estilo BR v0.977 DINHEIRO INFINITO - BAIXAR APK MOD</a>. This is a modded version of the game that gives you unlimited money and diamonds.</li>
66
- <li>Click on the green button that says "Download APK (125.77 MB)". This will start downloading the APK file to your device.</li>
67
- <li>Once the download is complete, locate the file in your device's download folder. Tap on it to start the installation process.</li>
68
- <li>If you see a message that says "For your security, your phone is not allowed to install unknown apps from this source", go to your device's settings and enable the option to install apps from unknown sources.</li>
69
- <li>Follow the steps on screen to complete the installation. You may need to grant some permissions to the app.</li>
70
- <li>Once the installation is done, you can open the app and enjoy Estilo BR with unlimited money and diamonds.</li>
71
- </ol>
72
- <p>Note: This method is not endorsed by the official developers of Estilo BR, and it may violate their terms of service. Use it at your own risk.</p>
73
- <h2>How to get unlimited money and diamonds in Estilo BR?</h2>
74
- <p>If you want to get unlimited money and diamonds in Estilo BR, you have two options:</p>
75
- <ul>
76
- <li>Use the modded version of the game that you downloaded from mediafıre. This will give you unlimited resources from the start, but it may also cause some glitches and errors in the game. You may also face some issues with online multiplayer mode, as other players may report you for cheating.</li>
77
- <li>Use a game hacking tool such as Game Guardian or Lucky Patcher. These tools allow you to modify the game's data and values, such as money and diamonds. However, this requires some technical skills and knowledge, and it may also harm your device or expose it to malware. You may also get banned from the game if you are detected by the anti-cheat system.</li>
78
- </ul>
79
- <p>Both of these options are not recommended, as they can ruin the fun and challenge of the game. The best way to enjoy Estilo BR is to play it fair and square, earning money and diamonds by winning races, completing missions, and watching ads. This will also support the developers of the game and help them improve it further.</p>
80
- <h2>What are the best tips and tricks for Estilo BR?</h2>
81
- <p>If you want to master Estilo BR and become a drag racing legend in Brazil, here are some tips and tricks that can help you:</p>
82
- <ul>
83
- <li>Choose your vehicle wisely. Each vehicle has different stats and characteristics, such as speed, acceleration, handling, weight, and nitro. Depending on the type of race and track, some vehicles may perform better than others. Experiment with different vehicles and find the ones that suit your style and preference.</li>
84
- <li>Upgrade your vehicle regularly. As you progress in the game, you will unlock new parts and accessories that can improve your vehicle's performance and appearance. You can upgrade your engine, transmission, tires, suspension, brakes, turbo, nitro, exhaust, intake, fuel system, cooling system, ignition system, battery, body kit, spoiler, hood, bumper, grille, lights, mirrors, windows, doors, trunk, roof rack, paint job, decals, stickers, rims, tires smoke color, and license plate. You can also buy new vehicles with better stats and features.</li>
85
- <li>Learn how to shift gears properly. Shifting gears at the right time is crucial for winning races, as it affects your speed and acceleration. You can use the manual or automatic mode, depending on your preference. In manual mode, you have to tap the screen to shift gears, while in automatic mode, the game does it for you. However, manual mode gives you more control and precision, while automatic mode may not always shift at the optimal moment. You can also use the nitro boost to gain an extra burst of speed, but be careful not to overheat your engine.</li>
86
- <li>Practice your skills in different modes and tracks. Estilo BR offers various modes and tracks to test your abilities and have fun. You can play in single-player mode, where you can race against AI opponents or complete missions and challenges. You can also play in multiplayer mode, where you can join global or private rooms and race against other players online. You can also explore the open world map and find hidden secrets and easter eggs. The game features different tracks with different terrains, weather conditions, and obstacles, such as asphalt, dirt, sand, rain, fog, night, traffic, ramps, bridges, tunnels, and more.</li>
87
- <li>Enjoy the game's style and culture. Estilo BR is more than just a racing game. It is also a tribute to the Brazilian street racing culture and style, with authentic vehicles, music, slang, and references. You can immerse yourself in the game's atmosphere and learn more about the history and diversity of Brazil's racing scene. You can also interact with other players and make new friends through the game's chat system.</li>
88
- </ul>
89
- <h2>Conclusion</h2>
90
- <p>Estilo BR is a fantastic drag racing game that will keep you hooked for hours. Whether you want to race against other players online, customize your vehicles with endless options, or explore the open world map with realistic graphics and physics, Estilo BR has it all.</p>
91
- <p>If you are looking for a way to download Estilo BR from mediafıre, you can follow the steps we provided above. However, we advise you to be careful when using modded or hacked versions of the game, as they may cause problems or get you banned.</p>
92
- <p>The best way to enjoy Estilo BR is to play it fair and square, earning money and diamonds by winning races, completing missions, and watching ads. This will also support the developers of the game and help them improve it further.</p>
93
- <p>So what are you waiting for? Download Estilo BR now and join the drag racing revolution in Brazil!</p>
94
- <h2>FAQs</h2>
95
- <p>Here are some frequently asked questions about Estilo BR:</p>
96
- <ol>
97
- <li><b>Is Estilo BR available for iOS devices?</b></li>
98
- <p>No, Estilo BR is only available for Android devices at the moment. The developers have not announced any plans to release an iOS version of the game.</p>
99
- <li><b>How can I contact the developers of Estilo BR?</b></li>
100
- <p>You can contact the developers of Estilo BR through their official Facebook page: <a href="">RF Entertainment - Home | Facebook</a>. You can also send them an email at [email protected].</p>
101
- <li><b>How can I report a bug or a problem in Estilo BR?</b></li>
102
- <p>You can report a bug or a problem in Estilo BR through the game's settings menu. Tap on the gear icon on the top right corner of the screen, then tap on "Report Bug". You can also send a screenshot or a video of the bug or problem to help the developers fix it.</p>
103
- <li><b>How can I support Estilo BR?</b></li>
104
- <p>You can support Estilo BR by playing the game regularly, rating it on Google Play Store, writing positive reviews, sharing it with your friends, and making in-app purchases. You can also follow the developers on their social media accounts and join their community of fans.</p>
105
- <li><b>How can I learn more about Estilo BR?</b></li>
106
- <p>You can learn more about Estilo BR by visiting the game's official website: <a href="">Estilo BR - RF Entertainment</a>. You can also watch gameplay videos and tutorials on YouTube, such as this one: <a href="">Estilo BR - Gameplay (Android) - YouTube</a>.</p>
107
- </ol></p> 197e85843d<br />
108
- <br />
109
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Velocity Rush Z Mod APK and Enjoy Unlimited Action and Money.md DELETED
@@ -1,128 +0,0 @@
1
- <br />
2
- <h1>Velocity Rush Z Mod Apk: A Fast-Paced Shooter with Parkour Elements</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are looking for a thrilling and adrenaline-pumping game that combines shooting and parkour, then you should check out Velocity Rush Z mod apk. This is a first-person shooter game with parkour elements from the creator of Velocity Rush. You can vault, climb, wallrun, slide and shoot mercenaries and zombies in the apocalyptic city to earn money to buy more weapons. In this article, we will tell you what is Velocity Rush Z, why you should download the mod apk version, what are its features, and how to install it on your device.</p>
5
- <h2>velocity rush z mod apk</h2><br /><p><b><b>DOWNLOAD</b> &#10003;&#10003;&#10003; <a href="https://jinyurl.com/2uNP39">https://jinyurl.com/2uNP39</a></b></p><br /><br />
6
- <h3>What is Velocity Rush Z?</h3>
7
- <p>Velocity Rush Z is a game that was released in 2021 by sosomod.net. It is a sequel to the popular game Velocity Rush, which was also a shooter with parkour elements. The game has improved graphics, gameplay, and features compared to the original one. You can experience high action shooting in close combat, bullet time (slowmo), parkour moves and skills, various weapons and upgrades, and an apocalyptic city setting. The game has a rating of 4.5 out of 5 stars on sosomod.net.</p>
8
- <h3>Why download Velocity Rush Z mod apk?</h3>
9
- <p>The mod apk version of Velocity Rush Z has some advantages over the original one. The mod apk version gives you unlimited money, which means you can buy any weapon or upgrade you want without worrying about the cost. You can also unlock all the levels and modes in the game, which means you can enjoy the game without any restrictions. The mod apk version also removes ads from the game, which means you can play without any interruptions or annoyances. The mod apk version is also safe and easy to install, as we will show you later.</p>
10
- <h2>Features of Velocity Rush Z mod apk</h2>
11
- <h3>High action shooting in close combat</h3>
12
- <p>The game is not for the faint-hearted, as you will face hordes of enemies in close quarters. You will have to use your reflexes and skills to survive and eliminate them. You can use different types of weapons, such as pistols, shotguns, rifles, grenades, and more. You can also switch between weapons quickly and reload them efficiently.</p>
13
- <h3>Bullet time (Slowmo)</h3>
14
- <p>One of the coolest features of the game is bullet time, which allows you to slow down time and aim more precisely at your enemies. You can activate bullet time by tapping on the screen or by using a special item. Bullet time can help you avoid bullets, dodge attacks, and take out multiple enemies at once.</p>
15
- <h3>Parkour moves and skills</h3>
16
- <p>The game is not just about shooting, but also about moving around the city with style and agility. You can perform parkour moves and skills, such as vaulting over obstacles, climbing walls, wallrunning, sliding under gaps, and more. You can also use these moves to reach hidden areas, find secrets, and escape from danger.</p>
17
- <p>velocity rush z mod apk download<br />
18
- velocity rush z apk mod unlimited money<br />
19
- velocity rush z mod apk latest version<br />
20
- velocity rush z mod apk android 1<br />
21
- velocity rush z mod apk free shopping<br />
22
- velocity rush z hack mod apk<br />
23
- velocity rush z mod apk revdl<br />
24
- velocity rush z mod apk rexdl<br />
25
- velocity rush z mod apk offline<br />
26
- velocity rush z mod apk no ads<br />
27
- velocity rush z fps shooter mod apk<br />
28
- velocity rush z parkour shooter mod apk<br />
29
- velocity rush z action game mod apk<br />
30
- velocity rush z bullet time mod apk<br />
31
- velocity rush z zombie mode mod apk<br />
32
- velocity rush z mod apk unlimited ammo<br />
33
- velocity rush z mod apk unlimited health<br />
34
- velocity rush z mod apk unlocked all weapons<br />
35
- velocity rush z mod apk unlimited coins<br />
36
- velocity rush z mod apk unlimited gems<br />
37
- velocity rush z premium mod apk<br />
38
- velocity rush z pro mod apk<br />
39
- velocity rush z vip mod apk<br />
40
- velocity rush z full version mod apk<br />
41
- velocity rush z cracked mod apk<br />
42
- download game velocity rush z mod apk<br />
43
- download velocity rush z hack mod apk<br />
44
- download velocity rush z cheat mod apk<br />
45
- download velocity rush z premium mod apk free<br />
46
- download velocity rush z pro mod apk gratis<br />
47
- how to install velocity rush z mod apk<br />
48
- how to play velocity rush z mod apk<br />
49
- how to update velocity rush z mod apk<br />
50
- how to get velocity rush z mod apk<br />
51
- how to download velocity rush z mod apk on pc<br />
52
- how to download velocity rush z mod apk on ios<br />
53
- how to download velocity rush z mod apk on android<br />
54
- best settings for velocity rush z mod apk<br />
55
- best tips for velocity rush z mod apk<br />
56
- best tricks for velocity rush z mod apk<br />
57
- best weapons in velocity rush z mod apk<br />
58
- best maps in velocity rush z mod apk<br />
59
- best modes in velocity rush z mod apk<br />
60
- best cheats for velocity rush z mod apk<br />
61
- best hacks for velocity rush z mod apk<br />
62
- best mods for velocity rush z mod apk<br />
63
- best sites to download velocity rush z mod apk<br />
64
- best reviews of velocity rush z mod apk<br />
65
- best alternatives to velocity rush z mod apk</p>
66
- <h3>Various weapons and upgrades</h3>
67
- <p>The game offers a variety of weapons and upgrades for you to choose from. You can buy new weapons or upgrade your existing ones with money that you earn from completing missions or killing enemies. You can also customize your weapons with different skins, attachments, and effects. Some of the weapons and upgrades available in the game are:</p>
68
- <table>
69
- <tr>
70
- <th>Weapon</th>
71
- <th>Description</th>
72
- <th>Upgrade</th>
73
- </tr>
74
- <tr> <td>Pistol</td>
75
- <td>A basic weapon that can fire fast and accurate shots.</td>
76
- <td>You can upgrade the pistol's damage, fire rate, magazine size, and reload speed.</td>
77
- </tr>
78
- <tr>
79
- <td>Shotgun</td>
80
- <td>A powerful weapon that can deal massive damage at close range.</td>
81
- <td>You can upgrade the shotgun's damage, spread, magazine size, and reload speed.</td>
82
- </tr>
83
- <tr>
84
- <td>Rifle</td>
85
- <td>A versatile weapon that can fire bursts of bullets at medium range.</td>
86
- <td>You can upgrade the rifle's damage, fire rate, magazine size, and reload speed.</td>
87
- </tr>
88
- <tr>
89
- <td>Grenade</td>
90
- <td>A explosive weapon that can cause area damage and knockback enemies.</td>
91
- <td>You can upgrade the grenade's damage, blast radius, and number of grenades you can carry.</td>
92
- </tr>
93
- <tr>
94
- <td>Slowmo Item</td>
95
- <td>A special item that can activate bullet time for a limited duration.</td>
96
- <td>You can upgrade the slowmo item's duration, cooldown, and number of slowmo items you can carry.</td>
97
- </tr>
98
- </table>
99
- <h3>Apocalyptic city setting</h3>
100
- <p>The game is set in a post-apocalyptic city that has been overrun by mercenaries and zombies. You will explore different locations in the city, such as rooftops, streets, alleys, buildings, and more. You will also encounter different types of enemies, such as snipers, melee fighters, bombers, and bosses. The game has a dark and gritty atmosphere that suits the theme of the game.</p>
101
- <h2>How to download and install Velocity Rush Z mod apk</h2>
102
- <h3>Step 1: Download the apk file from a trusted source</h3>
103
- <p>The first step is to download the apk file of Velocity Rush Z mod apk from a trusted source. You can use the link below to download the apk file from sosomod.net, which is a reliable website that provides mod apk games. The apk file size is about 100 MB, so make sure you have enough space on your device.</p>
104
- <h3>Step 2: Enable unknown sources on your device</h3>
105
- <p>The second step is to enable unknown sources on your device. This is necessary to install apk files that are not from the Google Play Store. To enable unknown sources, go to your device settings, then security or privacy, then toggle on the option that says "allow installation of apps from unknown sources". You may also need to confirm this action by tapping on "OK" or "Yes".</p>
106
- <h3>Step 3: Install the apk file and launch the game</h3>
107
- <p>The third step is to install the apk file and launch the game. To install the apk file, locate it in your device storage or downloads folder, then tap on it. You may see a pop-up window that asks you to confirm the installation. Tap on "Install" or "Next" until the installation is complete. Then, tap on "Open" or "Done" to launch the game. You can now enjoy Velocity Rush Z mod apk with unlimited money and unlocked levels.</p>
108
- <h2>Conclusion</h2>
109
- <p>Velocity Rush Z mod apk is a fast-paced shooter with parkour elements that will keep you on the edge of your seat. You can experience high action shooting in close combat, bullet time (slowmo), parkour moves and skills, various weapons and upgrades, and an apocalyptic city setting. You can also download the mod apk version of the game to get unlimited money and unlocked levels. To download and install Velocity Rush Z mod apk, follow the steps above. We hope you enjoy playing this game as much as we do.</p>
110
- <h3>FAQs</h3>
111
- <p>Here are some frequently asked questions about Velocity Rush Z mod apk:</p>
112
- <ul>
113
- <li><b>Q: Is Velocity Rush Z mod apk safe to download and install?</b></li>
114
- <li>A: Yes, Velocity Rush Z mod apk is safe to download and install from sosomod.net, which is a trusted website that provides mod apk games. The apk file does not contain any viruses or malware that can harm your device or data.</li>
115
- <li><b>Q: Do I need to root my device to use Velocity Rush Z mod apk?</b></li>
116
- <li>A: No, you do not need to root your device to use Velocity Rush Z mod apk. The mod apk works fine on both rooted and non-rooted devices.</li>
117
- <li><b>Q: Can I play Velocity Rush Z mod apk online with other players?</b></li>
118
- <li>A: No, Velocity Rush Z mod apk is an offline game that does not require an internet connection to play. You can play it anytime and anywhere without worrying about data usage or connection issues.</li>
119
- <li><b <li><b>Q: What are the minimum requirements to play Velocity Rush Z mod apk?</b></li>
120
- <li>A: The minimum requirements to play Velocity Rush Z mod apk are:</li>
121
- <ul>
122
- <li>Android 4.4 or higher</li>
123
- <li>At least 1 GB of RAM</li>
124
- <li>At least 200 MB of free storage space</li>
125
- </ul>
126
- </ul></p> 197e85843d<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Free Download Hitman Sniper APK - Play the Tactical Sniper Mission Game on Android.md DELETED
@@ -1,12 +0,0 @@
1
-
2
- <h1>Hitman Sniper: How to Download and Play the Best Sniper Game on Mobile</h1>
3
- If you are a fan of stealth, strategy, and shooting games, you might want to check out Hitman Sniper, one of the most popular and acclaimed sniper games on mobile. In this article, we will tell you what Hitman Sniper is, why you should play it, how to download it for free, and how to play it effectively. <h2>What is Hitman Sniper?</h2>
4
- Hitman Sniper is a mobile game developed by CDE Entertainment and published by Square Enix. It is based on the Hitman franchise, which follows the adventures of Agent 47, a professional assassin who works for a mysterious organization. In Hitman Sniper, you step into the shoes of Agent 47 and take on various sniping missions in different locations. You have to use your strategic skills and creativity to orchestrate the perfect assassination kill shot, while avoiding detection and eliminating other threats. The game features more than 150 missions and 11 different contracts, each with its own objectives, targets, and secrets. You can also unlock and upgrade 17 unique weapons, each with its own perks and abilities. The game also has a zombie mode, where you have to survive waves of undead enemies in a desert valley. You have to use your accuracy and speed to take down as many zombies as possible, while collecting weapon parts and blueprints. <h2>Why should you play Hitman Sniper?</h2>
5
- Hitman Sniper is not just a simple shooting game. It is a game that requires you to think, plan, and execute your actions with precision and finesse. Here are some of the benefits of playing Hitman Sniper: - It improves your concentration and focus. You have to pay attention to every detail in the environment, such as guards, cameras, traps, windows, doors, etc. You also have to monitor your target's movements and behavior, and wait for the right moment to strike. - It enhances your problem-solving and decision-making skills. You have to analyze the situation and choose the best course of action. You can use various methods to eliminate your target, such as headshots, body shots, accidents, explosions, distractions, etc. You also have to deal with unexpected events, such as alarms, reinforcements, witnesses, etc. - It stimulates your creativity and imagination. You can use your environment to your advantage, such as shooting objects to cause chain reactions, shooting electrical wires to electrocute enemies, shooting gas tanks to create fireballs, etc. You can also use your weapons in different ways, such as using silencers, scopes, suppressors, etc. - It provides you with entertainment and satisfaction. You can enjoy the stunning graphics and realistic sound effects of the game. You can also feel the thrill and excitement of pulling off a perfect kill shot. You can also compete against your friends and other players in the leaderboards. <h2>How to download Hitman Sniper APK for free?</h2>
6
- If you want to play Hitman Sniper on your Android device, you can download it from the Google Play Store for $0.99. However, if you want to get it for free, you can download an APK file from a third-party website. An APK file is an Android application package file that contains all the files needed to install an app on your device. However, before you download an APK file, you need to take some precautions: - Make sure that your device has enough storage space for the file. - Make sure that your device is compatible with the game's requirements. - Make sure that you have a reliable internet connection for the download. - Make sure that you have enabled the option to install apps from unknown sources in your device's settings. Once you have taken these precautions , you can follow these steps to install the APK file on your Android device: - Connect your Android device to your computer using a USB cable. - Copy the APK file from your computer to your device's storage. You can use any folder you want, but make sure you remember where you put it. - Disconnect your device from your computer and open your file explorer app on your device. - Locate the APK file you copied and tap on it to open it. - Tap Install at the bottom of the screen and wait for the installation to finish. - Tap Open to launch the game or Done to exit the installer. You have successfully installed Hitman Sniper APK for free on your Android device. Enjoy! <h2>How to play Hitman Sniper effectively?</h2>
7
- Now that you have downloaded and installed Hitman Sniper, you might want to know how to play it well and complete all the missions. Here are some tips and tricks to help you master the sniper skills and become the ultimate assassin: - Use the variable scope to zoom in and out while aiming. You can adjust the level of zoom by tapping the plus and minus buttons on the screen. You can also swipe left and right to move the scope horizontally and up and down to move it vertically. - Use the marksman perk to improve your aim and slow time. You can activate this perk by pressing the Shift key on your keyboard or tapping the icon on the screen. This will allow you to aim more precisely and take advantage of opportunities that might otherwise be missed. - Use the piercing perk to penetrate bodies and objects. This perk will let you shoot through multiple targets with one bullet, creating collateral damage and saving ammo. You can also use this perk to shoot through glass, walls, doors, etc. - Use the environment to your advantage. You can shoot various objects in the environment to cause chain reactions, accidents, explosions, distractions, etc. For example, you can shoot a car's gas tank to make it explode, a chandelier to make it fall, a fire extinguisher to create a smoke screen, etc. - Use different methods to eliminate your target. You don't have to always go for a headshot or a body shot. You can also use other methods, such as accidents, poison, explosions, etc. For example, you can shoot a gas pipe near your target to make it leak, then shoot a nearby candle to ignite it and create a fireball. - Use different weapons and perks for different scenarios. You can unlock and upgrade 17 unique weapons in the game, each with its own perks and abilities. You can also equip different perks for each weapon, such as damage, rate of fire, extended magazine, ammo, subsonic, suppressor, etc. You should choose the weapon and perk combination that suits your style and mission objective. - Complete challenges and contracts to earn money and rewards. You can complete various challenges and contracts in each mission, such as killing a certain number of targets, killing targets in a certain way, killing targets within a time limit, etc. These will earn you money and rewards, such as weapon parts, blueprints, perks, etc. You can use these to unlock and upgrade your weapons and perks. <h2>Conclusion</h2>
8
- Hitman Sniper is a fun and challenging sniper game that will test your strategic skills and creativity. You can download it for free from a third-party website using an APK file, but make sure you take some precautions before doing so. You can also use our tips and tricks to play the game effectively and complete all the missions. If you are ready to become the best sniper in the world, download Hitman Sniper today and enjoy! <h3>FAQs</h3>
9
- Q: How do I get more money in Hitman Sniper? A: You can get more money by completing challenges and contracts in each mission. You can also replay missions to earn more money. Q: How do I unlock more weapons in Hitman Sniper? A: You can unlock more weapons by collecting weapon parts and blueprints in each mission. You can also buy some weapons with real money. Q: How do I upgrade my weapons in Hitman Sniper? A: You can upgrade your weapons by using weapon parts and blueprints that you have collected or bought. You can also equip different perks for each weapon. Q: How do I switch weapons in Hitman Sniper? A: You can switch weapons by tapping the weapon icon on the screen or pressing the Q key on your keyboard. Q: How do I play zombie mode in Hitman Sniper? A: You can play zombie mode by tapping the zombie icon on the main menu or pressing the Z key on your keyboard.</p>
10
- <h2>hitman sniper download apk free</h2><br /><p><b><b>Download</b> ->->->-> <a href="https://jinyurl.com/2uNQut">https://jinyurl.com/2uNQut</a></b></p><br /><br /> 197e85843d<br />
11
- <br />
12
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/lib/bots/bing/tts.ts DELETED
@@ -1,82 +0,0 @@
1
- import { sleep } from './utils'
2
-
3
- const synth = window.speechSynthesis
4
-
5
- export class TTS {
6
- currentText = ''
7
- speakText = ''
8
- private controller = new AbortController()
9
- speaking = false
10
- get isSpeaking() {
11
- return this.speaking
12
- }
13
- finished = false
14
- constructor() {}
15
- abort = () => {
16
- this.controller.abort()
17
- }
18
-
19
- reset = () => {
20
- this.speaking = false
21
- this.finished = true
22
- this.currentText = ''
23
- this.speakText = ''
24
- this.abort()
25
- }
26
-
27
- speak = (text: string) => {
28
- if (!synth || text?.trim()?.length < 2) {
29
- return
30
- }
31
- this.currentText = text.replace(/[^\u4e00-\u9fa5_a-zA-Z0-9,。?,:;\.,:]+/g, '')
32
- this.finished = false
33
- this.loop()
34
- }
35
-
36
- private async doSpeek() {
37
- return new Promise((resolve) => {
38
- const endIndex = this.finished ? this.currentText.length :
39
- Math.max(
40
- this.currentText.lastIndexOf('。'),
41
- this.currentText.lastIndexOf(';'),
42
- this.currentText.lastIndexOf('、'),
43
- this.currentText.lastIndexOf('?'),
44
- this.currentText.lastIndexOf('\n')
45
- )
46
- const startIndex = this.speakText.length ? Math.max(0, this.currentText.lastIndexOf(this.speakText) + this.speakText.length) : 0
47
-
48
- if (startIndex >= endIndex) {
49
- return resolve(true)
50
- }
51
- const text = this.currentText.slice(startIndex, endIndex)
52
- this.speakText = text
53
- const utterThis = new SpeechSynthesisUtterance(text)
54
- this.controller.signal.onabort = () => {
55
- synth.cancel()
56
- this.finished = true
57
- resolve(false)
58
- }
59
-
60
- utterThis.onend = function (event) {
61
- resolve(true)
62
- }
63
-
64
- utterThis.onerror = function (event) {
65
- resolve(false)
66
- }
67
-
68
- const voice = synth.getVoices().find(v => v.name.includes('Microsoft Yunxi Online')) ?? null
69
- utterThis.voice = voice
70
- synth.speak(utterThis)
71
- })
72
- }
73
-
74
- private async loop() {
75
- if (this.speaking) return
76
- this.speaking = true
77
- while(!this.finished) {
78
- await Promise.all([sleep(1000), this.doSpeek()])
79
- }
80
- this.speaking = false
81
- }
82
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2ndelement/voicevox/voicevox_engine/utility/core_version_utility.py DELETED
@@ -1,14 +0,0 @@
1
- from typing import Iterable
2
-
3
- from semver.version import Version
4
-
5
-
6
- def parse_core_version(version: str) -> Version:
7
- return Version.parse(version)
8
-
9
-
10
- def get_latest_core_version(versions: Iterable[str]) -> str:
11
- if len(versions) == 0:
12
- raise Exception("versions must be non-empty.")
13
-
14
- return str(max(map(parse_core_version, versions)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/utils/backups.py DELETED
@@ -1,141 +0,0 @@
1
- import os
2
- import shutil
3
- import hashlib
4
- import time
5
- import base64
6
-
7
-
8
-
9
-
10
- LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
11
- WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
12
- GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup'
13
-
14
- def import_google_drive_backup():
15
- print("Importing Google Drive backup...")
16
- weights_exist = False
17
- for root, dirs, files in os.walk(GOOGLE_DRIVE_PATH):
18
- for filename in files:
19
- filepath = os.path.join(root, filename)
20
- if os.path.isfile(filepath) and not filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')):
21
- backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
22
- backup_folderpath = os.path.dirname(backup_filepath)
23
- if not os.path.exists(backup_folderpath):
24
- os.makedirs(backup_folderpath)
25
- print(f'Created backup folder: {backup_folderpath}', flush=True)
26
- shutil.copy2(filepath, backup_filepath) # copy file with metadata
27
- print(f'Imported file from Google Drive backup: {filename}')
28
- elif filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')) and filename.endswith('.pth'):
29
- weights_exist = True
30
- weights_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, os.path.join(GOOGLE_DRIVE_PATH, 'weights')))
31
- weights_folderpath = os.path.dirname(weights_filepath)
32
- if not os.path.exists(weights_folderpath):
33
- os.makedirs(weights_folderpath)
34
- print(f'Created weights folder: {weights_folderpath}', flush=True)
35
- shutil.copy2(filepath, weights_filepath) # copy file with metadata
36
- print(f'Imported file from weights: {filename}')
37
- if weights_exist:
38
- print("Copied weights from Google Drive backup to local weights folder.")
39
- else:
40
- print("No weights found in Google Drive backup.")
41
- print("Google Drive backup import completed.")
42
-
43
- def get_md5_hash(file_path):
44
- hash_md5 = hashlib.md5()
45
- with open(file_path, "rb") as f:
46
- for chunk in iter(lambda: f.read(4096), b""):
47
- hash_md5.update(chunk)
48
- return hash_md5.hexdigest()
49
-
50
- def copy_weights_folder_to_drive():
51
- destination_folder = os.path.join(GOOGLE_DRIVE_PATH, 'weights')
52
- try:
53
- if not os.path.exists(destination_folder):
54
- os.makedirs(destination_folder)
55
-
56
- num_copied = 0
57
- for filename in os.listdir(WEIGHTS_FOLDER):
58
- if filename.endswith('.pth'):
59
- source_file = os.path.join(WEIGHTS_FOLDER, filename)
60
- destination_file = os.path.join(destination_folder, filename)
61
- if not os.path.exists(destination_file):
62
- shutil.copy2(source_file, destination_file)
63
- num_copied += 1
64
- print(f"Copied {filename} to Google Drive!")
65
-
66
- if num_copied == 0:
67
- print("No new finished models found for copying.")
68
- else:
69
- print(f"Finished copying {num_copied} files to Google Drive!")
70
-
71
- except Exception as e:
72
- print(f"An error occurred while copying weights: {str(e)}")
73
- # You can log the error or take appropriate actions here.
74
-
75
- def backup_files():
76
- print("\nStarting backup loop...")
77
- last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt')
78
- fully_updated = False # boolean to track if all files are up to date
79
-
80
- while True:
81
- try:
82
- updated = False # flag to check if any files were updated
83
- last_backup_timestamps = {}
84
-
85
- try:
86
- with open(last_backup_timestamps_path, 'r') as f:
87
- last_backup_timestamps = dict(line.strip().split(':') for line in f)
88
- except FileNotFoundError:
89
- pass # File does not exist yet, which is fine
90
-
91
- for root, dirs, files in os.walk(LOGS_FOLDER):
92
- for filename in files:
93
- if filename != 'last_backup_timestamps.txt':
94
- filepath = os.path.join(root, filename)
95
- if os.path.isfile(filepath):
96
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
97
- backup_folderpath = os.path.dirname(backup_filepath)
98
- if not os.path.exists(backup_folderpath):
99
- os.makedirs(backup_folderpath)
100
- print(f'Created backup folder: {backup_folderpath}', flush=True)
101
- # check if file has changed since last backup
102
- last_backup_timestamp = last_backup_timestamps.get(filepath)
103
- current_timestamp = os.path.getmtime(filepath)
104
- if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp:
105
- shutil.copy2(filepath, backup_filepath) # copy file with metadata
106
- last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp
107
- if last_backup_timestamp is None:
108
- print(f'Backed up file: {filename}')
109
- else:
110
- print(f'Updating backed up file: {filename}')
111
- updated = True
112
- fully_updated = False # if a file is updated, all files are not up to date
113
-
114
- # check if any files were deleted in Colab and delete them from the backup drive
115
- for filepath in list(last_backup_timestamps.keys()):
116
- if not os.path.exists(filepath):
117
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
118
- if os.path.exists(backup_filepath):
119
- os.remove(backup_filepath)
120
- print(f'Deleted file: {filepath}')
121
- del last_backup_timestamps[filepath]
122
- updated = True
123
- fully_updated = False # if a file is deleted, all files are not up to date
124
-
125
- if not updated and not fully_updated:
126
- print("Files are up to date.")
127
- fully_updated = True # if all files are up to date, set the boolean to True
128
- copy_weights_folder_to_drive()
129
- sleep_time = 15
130
- else:
131
- sleep_time = 0.1
132
-
133
- with open(last_backup_timestamps_path, 'w') as f:
134
- for filepath, timestamp in last_backup_timestamps.items():
135
- f.write(f'{filepath}:{timestamp}\n')
136
-
137
- time.sleep(sleep_time) # wait for 15 seconds before checking again, or 0.1s if not fully up to date to speed up backups
138
-
139
- except Exception as e:
140
- print(f"An error occurred: {str(e)}")
141
- # You can log the error or take appropriate actions here.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI4PD/hexviz/hexviz/ec_number.py DELETED
@@ -1,9 +0,0 @@
1
- class ECNumber:
2
- def __init__(self, number, coordinate, color, radius):
3
- self.number = number
4
- self.coordinate = coordinate
5
- self.color = color
6
- self.radius = radius
7
-
8
- def __str__(self):
9
- return f"(EC: {self.number}, Coordinate: {self.coordinate}, Color: {self.color})"
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/loss.py DELETED
@@ -1,291 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Loss functions
4
- """
5
-
6
- import torch
7
- import torch.nn as nn
8
-
9
- from utils.metrics import bbox_iou
10
- from utils.torch_utils import de_parallel
11
-
12
-
13
- def smooth_BCE(
14
- eps=0.1,
15
- ): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
16
- # return positive, negative label smoothing BCE targets
17
- return 1.0 - 0.5 * eps, 0.5 * eps
18
-
19
-
20
- class BCEBlurWithLogitsLoss(nn.Module):
21
- # BCEwithLogitLoss() with reduced missing label effects.
22
- def __init__(self, alpha=0.05):
23
- super().__init__()
24
- self.loss_fcn = nn.BCEWithLogitsLoss(
25
- reduction="none"
26
- ) # must be nn.BCEWithLogitsLoss()
27
- self.alpha = alpha
28
-
29
- def forward(self, pred, true):
30
- loss = self.loss_fcn(pred, true)
31
- pred = torch.sigmoid(pred) # prob from logits
32
- dx = pred - true # reduce only missing label effects
33
- # dx = (pred - true).abs() # reduce missing label and false label effects
34
- alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
35
- loss *= alpha_factor
36
- return loss.mean()
37
-
38
-
39
- class FocalLoss(nn.Module):
40
- # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
41
- def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
42
- super().__init__()
43
- self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
44
- self.gamma = gamma
45
- self.alpha = alpha
46
- self.reduction = loss_fcn.reduction
47
- self.loss_fcn.reduction = (
48
- "none" # required to apply FL to each element
49
- )
50
-
51
- def forward(self, pred, true):
52
- loss = self.loss_fcn(pred, true)
53
- # p_t = torch.exp(-loss)
54
- # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
55
-
56
- # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
57
- pred_prob = torch.sigmoid(pred) # prob from logits
58
- p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
59
- alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
60
- modulating_factor = (1.0 - p_t) ** self.gamma
61
- loss *= alpha_factor * modulating_factor
62
-
63
- if self.reduction == "mean":
64
- return loss.mean()
65
- elif self.reduction == "sum":
66
- return loss.sum()
67
- else: # 'none'
68
- return loss
69
-
70
-
71
- class QFocalLoss(nn.Module):
72
- # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
73
- def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
74
- super().__init__()
75
- self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
76
- self.gamma = gamma
77
- self.alpha = alpha
78
- self.reduction = loss_fcn.reduction
79
- self.loss_fcn.reduction = (
80
- "none" # required to apply FL to each element
81
- )
82
-
83
- def forward(self, pred, true):
84
- loss = self.loss_fcn(pred, true)
85
-
86
- pred_prob = torch.sigmoid(pred) # prob from logits
87
- alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
88
- modulating_factor = torch.abs(true - pred_prob) ** self.gamma
89
- loss *= alpha_factor * modulating_factor
90
-
91
- if self.reduction == "mean":
92
- return loss.mean()
93
- elif self.reduction == "sum":
94
- return loss.sum()
95
- else: # 'none'
96
- return loss
97
-
98
-
99
- class ComputeLoss:
100
- sort_obj_iou = False
101
-
102
- # Compute losses
103
- def __init__(self, model, autobalance=False):
104
- device = next(model.parameters()).device # get model device
105
- h = model.hyp # hyperparameters
106
-
107
- # Define criteria
108
- BCEcls = nn.BCEWithLogitsLoss(
109
- pos_weight=torch.tensor([h["cls_pw"]], device=device)
110
- )
111
- BCEobj = nn.BCEWithLogitsLoss(
112
- pos_weight=torch.tensor([h["obj_pw"]], device=device)
113
- )
114
-
115
- # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
116
- self.cp, self.cn = smooth_BCE(
117
- eps=h.get("label_smoothing", 0.0)
118
- ) # positive, negative BCE targets
119
-
120
- # Focal loss
121
- g = h["fl_gamma"] # focal loss gamma
122
- if g > 0:
123
- BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
124
-
125
- m = de_parallel(model).model[-1] # Detect() module
126
- self.balance = {3: [4.0, 1.0, 0.4]}.get(
127
- m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]
128
- ) # P3-P7
129
- self.ssi = (
130
- list(m.stride).index(16) if autobalance else 0
131
- ) # stride 16 index
132
- self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = (
133
- BCEcls,
134
- BCEobj,
135
- 1.0,
136
- h,
137
- autobalance,
138
- )
139
- self.na = m.na # number of anchors
140
- self.nc = m.nc # number of classes
141
- self.nl = m.nl # number of layers
142
- self.anchors = m.anchors
143
- self.device = device
144
-
145
- def __call__(self, p, targets): # predictions, targets
146
- lcls = torch.zeros(1, device=self.device) # class loss
147
- lbox = torch.zeros(1, device=self.device) # box loss
148
- lobj = torch.zeros(1, device=self.device) # object loss
149
- tcls, tbox, indices, anchors = self.build_targets(
150
- p, targets
151
- ) # targets
152
-
153
- # Losses
154
- for i, pi in enumerate(p): # layer index, layer predictions
155
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
156
- tobj = torch.zeros(
157
- pi.shape[:4], dtype=pi.dtype, device=self.device
158
- ) # target obj
159
-
160
- n = b.shape[0] # number of targets
161
- if n:
162
- # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0
163
- pxy, pwh, _, pcls = pi[b, a, gj, gi].split(
164
- (2, 2, 1, self.nc), 1
165
- ) # target-subset of predictions
166
-
167
- # Regression
168
- pxy = pxy.sigmoid() * 2 - 0.5
169
- pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i]
170
- pbox = torch.cat((pxy, pwh), 1) # predicted box
171
- iou = bbox_iou(
172
- pbox, tbox[i], CIoU=True
173
- ).squeeze() # iou(prediction, target)
174
- lbox += (1.0 - iou).mean() # iou loss
175
-
176
- # Objectness
177
- iou = iou.detach().clamp(0).type(tobj.dtype)
178
- if self.sort_obj_iou:
179
- j = iou.argsort()
180
- b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j]
181
- if self.gr < 1:
182
- iou = (1.0 - self.gr) + self.gr * iou
183
- tobj[b, a, gj, gi] = iou # iou ratio
184
-
185
- # Classification
186
- if self.nc > 1: # cls loss (only if multiple classes)
187
- t = torch.full_like(
188
- pcls, self.cn, device=self.device
189
- ) # targets
190
- t[range(n), tcls[i]] = self.cp
191
- lcls += self.BCEcls(pcls, t) # BCE
192
-
193
- # Append targets to text file
194
- # with open('targets.txt', 'a') as file:
195
- # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
196
-
197
- obji = self.BCEobj(pi[..., 4], tobj)
198
- lobj += obji * self.balance[i] # obj loss
199
- if self.autobalance:
200
- self.balance[i] = (
201
- self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
202
- )
203
-
204
- if self.autobalance:
205
- self.balance = [x / self.balance[self.ssi] for x in self.balance]
206
- lbox *= self.hyp["box"]
207
- lobj *= self.hyp["obj"]
208
- lcls *= self.hyp["cls"]
209
- bs = tobj.shape[0] # batch size
210
-
211
- return (lbox + lobj + lcls) * bs, torch.cat(
212
- (lbox, lobj, lcls)
213
- ).detach()
214
-
215
- def build_targets(self, p, targets):
216
- # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
217
- na, nt = self.na, targets.shape[0] # number of anchors, targets
218
- tcls, tbox, indices, anch = [], [], [], []
219
- gain = torch.ones(
220
- 7, device=self.device
221
- ) # normalized to gridspace gain
222
- ai = (
223
- torch.arange(na, device=self.device)
224
- .float()
225
- .view(na, 1)
226
- .repeat(1, nt)
227
- ) # same as .repeat_interleave(nt)
228
- targets = torch.cat(
229
- (targets.repeat(na, 1, 1), ai[..., None]), 2
230
- ) # append anchor indices
231
-
232
- g = 0.5 # bias
233
- off = (
234
- torch.tensor(
235
- [
236
- [0, 0],
237
- [1, 0],
238
- [0, 1],
239
- [-1, 0],
240
- [0, -1], # j,k,l,m
241
- # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
242
- ],
243
- device=self.device,
244
- ).float()
245
- * g
246
- ) # offsets
247
-
248
- for i in range(self.nl):
249
- anchors, shape = self.anchors[i], p[i].shape
250
- gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain
251
-
252
- # Match targets to anchors
253
- t = targets * gain # shape(3,n,7)
254
- if nt:
255
- # Matches
256
- r = t[..., 4:6] / anchors[:, None] # wh ratio
257
- j = (
258
- torch.max(r, 1 / r).max(2)[0] < self.hyp["anchor_t"]
259
- ) # compare
260
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
261
- t = t[j] # filter
262
-
263
- # Offsets
264
- gxy = t[:, 2:4] # grid xy
265
- gxi = gain[[2, 3]] - gxy # inverse
266
- j, k = ((gxy % 1 < g) & (gxy > 1)).T
267
- l, m = ((gxi % 1 < g) & (gxi > 1)).T
268
- j = torch.stack((torch.ones_like(j), j, k, l, m))
269
- t = t.repeat((5, 1, 1))[j]
270
- offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
271
- else:
272
- t = targets[0]
273
- offsets = 0
274
-
275
- # Define
276
- bc, gxy, gwh, a = t.chunk(
277
- 4, 1
278
- ) # (image, class), grid xy, grid wh, anchors
279
- a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class
280
- gij = (gxy - offsets).long()
281
- gi, gj = gij.T # grid indices
282
-
283
- # Append
284
- indices.append(
285
- (b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))
286
- ) # image, anchor, grid
287
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
288
- anch.append(anchors[a]) # anchors
289
- tcls.append(c) # class
290
-
291
- return tcls, tbox, indices, anch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/ImMagician-Image-Generator/style.css DELETED
@@ -1,24 +0,0 @@
1
- h1 {
2
- text-align: center;
3
- }
4
-
5
- #duplicate-button {
6
- margin: auto;
7
- color: #fff;
8
- background: #1565c0;
9
- border-radius: 100vh;
10
- }
11
-
12
- #component-0 {
13
- max-width: 730px;
14
- margin: auto;
15
- }
16
-
17
- #share-btn-container{padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; max-width: 13rem; margin-left: auto;margin-top: 0.35em;}
18
- div#share-btn-container > div {flex-direction: row;background: black;align-items: center}
19
- #share-btn-container:hover {background-color: #060606}
20
- #share-btn {all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.5rem !important; padding-bottom: 0.5rem !important;right:0;font-size: 15px;}
21
- #share-btn * {all: unset}
22
- #share-btn-container div:nth-child(-n+2){width: auto !important;min-height: 0px !important;}
23
- #share-btn-container .wrap {display: none !important}
24
- #share-btn-container.hidden {display: none!important}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/dpt_depth.py DELETED
@@ -1,109 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from .base_model import BaseModel
6
- from .blocks import (
7
- FeatureFusionBlock,
8
- FeatureFusionBlock_custom,
9
- Interpolate,
10
- _make_encoder,
11
- forward_vit,
12
- )
13
-
14
-
15
- def _make_fusion_block(features, use_bn):
16
- return FeatureFusionBlock_custom(
17
- features,
18
- nn.ReLU(False),
19
- deconv=False,
20
- bn=use_bn,
21
- expand=False,
22
- align_corners=True,
23
- )
24
-
25
-
26
- class DPT(BaseModel):
27
- def __init__(
28
- self,
29
- head,
30
- features=256,
31
- backbone="vitb_rn50_384",
32
- readout="project",
33
- channels_last=False,
34
- use_bn=False,
35
- ):
36
-
37
- super(DPT, self).__init__()
38
-
39
- self.channels_last = channels_last
40
-
41
- hooks = {
42
- "vitb_rn50_384": [0, 1, 8, 11],
43
- "vitb16_384": [2, 5, 8, 11],
44
- "vitl16_384": [5, 11, 17, 23],
45
- }
46
-
47
- # Instantiate backbone and reassemble blocks
48
- self.pretrained, self.scratch = _make_encoder(
49
- backbone,
50
- features,
51
- False, # Set to true of you want to train from scratch, uses ImageNet weights
52
- groups=1,
53
- expand=False,
54
- exportable=False,
55
- hooks=hooks[backbone],
56
- use_readout=readout,
57
- )
58
-
59
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
60
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
61
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
62
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
63
-
64
- self.scratch.output_conv = head
65
-
66
-
67
- def forward(self, x):
68
- if self.channels_last == True:
69
- x.contiguous(memory_format=torch.channels_last)
70
-
71
- layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
-
73
- layer_1_rn = self.scratch.layer1_rn(layer_1)
74
- layer_2_rn = self.scratch.layer2_rn(layer_2)
75
- layer_3_rn = self.scratch.layer3_rn(layer_3)
76
- layer_4_rn = self.scratch.layer4_rn(layer_4)
77
-
78
- path_4 = self.scratch.refinenet4(layer_4_rn)
79
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
-
83
- out = self.scratch.output_conv(path_1)
84
-
85
- return out
86
-
87
-
88
- class DPTDepthModel(DPT):
89
- def __init__(self, path=None, non_negative=True, **kwargs):
90
- features = kwargs["features"] if "features" in kwargs else 256
91
-
92
- head = nn.Sequential(
93
- nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
94
- Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
95
- nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
96
- nn.ReLU(True),
97
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
98
- nn.ReLU(True) if non_negative else nn.Identity(),
99
- nn.Identity(),
100
- )
101
-
102
- super().__init__(head, **kwargs)
103
-
104
- if path is not None:
105
- self.load(path)
106
-
107
- def forward(self, x):
108
- return super().forward(x).squeeze(dim=1)
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Addai/Breast_cancer_detection_with_deep_transfer_learning/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Breast Cancer Detection With Deep Transfer Learning
3
- emoji: 📈
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/models/__init__.py DELETED
@@ -1 +0,0 @@
1
- # init
 
 
spaces/AgentVerse/agentVerse/ui/src/classes/event_center.ts DELETED
@@ -1,5 +0,0 @@
1
- import { Events } from "phaser";
2
-
3
- const eventsCenter = new Events.EventEmitter();
4
-
5
- export default eventsCenter;
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/localforage-files.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Files from './storage/localforage/files/Files';
2
- export default Files;
 
 
 
spaces/AkashKhamkar/QnA-generator/before_run.py DELETED
@@ -1,6 +0,0 @@
1
- import nltk
2
-
3
- nltk.download('stopwords')
4
- nltk.download('wordnet')
5
- nltk.download('punkt')
6
- nltk.download('brown')
 
 
 
 
 
 
 
spaces/Ame42/UBTH/utils.py DELETED
@@ -1,132 +0,0 @@
1
- # This is a sample Python script.
2
-
3
- # Press Shift+F10 to execute it or replace it with your code.
4
- import os.path
5
- import pandas as pd
6
- import glob
7
- import os
8
-
9
- sn = "S/N"
10
- ipp = "IPPIS"
11
- gif = "GIFMIS"
12
- col_1 = "BENEFICIARY NAME"
13
- gif_col = [col_1, "Employee", "Rank", "Amount"]
14
- ipp_col = ["Employee Number", "Full Name", "Grade Level", "Step", "Grosss Deductions SUM 1"]
15
-
16
-
17
- def get_raw(link, sheet, file_ext='.xlsx'):
18
- match file_ext:
19
- # handle testing files
20
- case '.csv':
21
- return pd.read_csv(link)
22
-
23
- case '.xlsx' | '.xls':
24
- return pd.read_excel(link, sheet_name=sheet)
25
-
26
- case _:
27
- return UnusualFileError(link, "Invalid file extension")
28
-
29
-
30
- def get_data(link, sheet, doc_type=ipp, file_type='.csv'):
31
- match file_type:
32
- # handle testing files
33
- case '.csv':
34
- return pd.read_csv(link)
35
-
36
- # handle GIFMIS files
37
- case '.xlsx' | '.xls' if doc_type == gif:
38
-
39
- try:
40
- data = pd.read_excel(link, sheet_name=sheet, skiprows=3, header=0)
41
- return data.drop(data.columns.difference(gif_col), axis=1)
42
- except ValueError as err:
43
- raise UnusualFileError(link, str(err))
44
- except KeyError:
45
- return None
46
-
47
- # handle IPPIS files
48
- case '.xlsx' | '.xls' if doc_type == ipp:
49
-
50
- try:
51
- data = pd.read_excel(link, sheet_name=sheet, skiprows=4, header=0)
52
- return data.drop(data.columns.difference(ipp_col), axis=1)
53
- except ValueError as err:
54
- raise UnusualFileError(link, str(err))
55
- except KeyError:
56
- return None
57
-
58
- # default
59
- case _:
60
- return None
61
-
62
-
63
- def merge_two(first: pd.DataFrame, second: pd.DataFrame, doc_type):
64
- hows = ['inner', 'left', 'right']
65
- first = first.drop(sn, axis=1, errors="ignore")
66
- second = second.drop(sn, axis=1, errors="ignore")
67
-
68
- both, prev, curr = tuple(
69
- [first.merge(second, how=how, on=first.columns[0] if doc_type == ipp else first.columns[1]) for how in hows]
70
- )
71
-
72
- prev = prev[
73
- prev[
74
- prev.columns[5] if doc_type == ipp else prev.columns[4] # Get rows where name column is empty
75
- ].isnull()
76
- ].dropna(subset=[
77
- prev.columns[0] if doc_type == ipp else prev.columns[1] # Check for empty rows in the employee number column
78
- ]).dropna(axis=1, how="all") # Remove empty columns
79
-
80
- curr = curr[
81
- curr[
82
- curr.columns[1] if doc_type == ipp else curr.columns[0] # Get rows where name column is empty
83
- ].isnull()
84
- ].dropna(subset=[
85
- curr.columns[0] if doc_type == ipp else curr.columns[1] # Check for empty rows in the employee number column
86
- ]).dropna(axis=1, how="all") # Remove empty columns
87
-
88
- return both, prev, curr
89
-
90
-
91
- def merge_all(data_list, keys=tuple("Employee")):
92
- return pd.concat(
93
- [data.drop(sn, axis=1, errors="ignore") for data in data_list],
94
- axis=1,
95
- join='inner',
96
- keys=keys,
97
- ignore_index=True
98
- )
99
-
100
-
101
- def retrieve(dt):
102
- return get_data(dt.name, os.path.splitext(dt.name)[1])
103
-
104
-
105
- def clear_csv_trash():
106
- pattern = '*.csv' # Desired file pattern
107
-
108
- # Get a list of file paths matching the pattern
109
- matching_files = glob.glob(pattern)
110
-
111
- # Loop through the matching files and delete them
112
- for file_path in matching_files:
113
- try:
114
- os.remove(file_path)
115
- except OSError as e:
116
- print(f"Error deleting {file_path}: {e}")
117
-
118
-
119
- class UnusualFileError(Exception):
120
- def __init__(self, file, message):
121
- self.source = file
122
- self.cause = message
123
-
124
- def __str__(self):
125
- from numpy.core._dtype import __repr__
126
- return __repr__(self.source)
127
-
128
- def get_file(self):
129
- return self.source
130
-
131
- def get_message(self):
132
- return self.cause
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py DELETED
@@ -1,63 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/faster_rcnn_r50_fpn.py',
3
- '../_base_/datasets/coco_detection.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
- model = dict(
7
- pretrained='open-mmlab://regnetx_3.2gf',
8
- backbone=dict(
9
- _delete_=True,
10
- type='RegNet',
11
- arch='regnetx_3.2gf',
12
- out_indices=(0, 1, 2, 3),
13
- frozen_stages=1,
14
- norm_cfg=dict(type='BN', requires_grad=True),
15
- norm_eval=True,
16
- style='pytorch'),
17
- neck=dict(
18
- type='FPN',
19
- in_channels=[96, 192, 432, 1008],
20
- out_channels=256,
21
- num_outs=5))
22
- img_norm_cfg = dict(
23
- # The mean and std are used in PyCls when training RegNets
24
- mean=[103.53, 116.28, 123.675],
25
- std=[57.375, 57.12, 58.395],
26
- to_rgb=False)
27
- train_pipeline = [
28
- dict(type='LoadImageFromFile'),
29
- dict(type='LoadAnnotations', with_bbox=True),
30
- dict(
31
- type='Resize',
32
- img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
33
- (1333, 768), (1333, 800)],
34
- multiscale_mode='value',
35
- keep_ratio=True),
36
- dict(type='RandomFlip', flip_ratio=0.5),
37
- dict(type='Normalize', **img_norm_cfg),
38
- dict(type='Pad', size_divisor=32),
39
- dict(type='DefaultFormatBundle'),
40
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
41
- ]
42
- test_pipeline = [
43
- dict(type='LoadImageFromFile'),
44
- dict(
45
- type='MultiScaleFlipAug',
46
- img_scale=(1333, 800),
47
- flip=False,
48
- transforms=[
49
- dict(type='Resize', keep_ratio=True),
50
- dict(type='RandomFlip'),
51
- dict(type='Normalize', **img_norm_cfg),
52
- dict(type='Pad', size_divisor=32),
53
- dict(type='ImageToTensor', keys=['img']),
54
- dict(type='Collect', keys=['img']),
55
- ])
56
- ]
57
- data = dict(
58
- train=dict(pipeline=train_pipeline),
59
- val=dict(pipeline=test_pipeline),
60
- test=dict(pipeline=test_pipeline))
61
- optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
62
- lr_config = dict(step=[28, 34])
63
- runner = dict(type='EpochBasedRunner', max_epochs=36)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/hrnet.py DELETED
@@ -1,537 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
3
- kaiming_init)
4
- from mmcv.runner import load_checkpoint
5
- from torch.nn.modules.batchnorm import _BatchNorm
6
-
7
- from mmdet.utils import get_root_logger
8
- from ..builder import BACKBONES
9
- from .resnet import BasicBlock, Bottleneck
10
-
11
-
12
- class HRModule(nn.Module):
13
- """High-Resolution Module for HRNet.
14
-
15
- In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
16
- is in this module.
17
- """
18
-
19
- def __init__(self,
20
- num_branches,
21
- blocks,
22
- num_blocks,
23
- in_channels,
24
- num_channels,
25
- multiscale_output=True,
26
- with_cp=False,
27
- conv_cfg=None,
28
- norm_cfg=dict(type='BN')):
29
- super(HRModule, self).__init__()
30
- self._check_branches(num_branches, num_blocks, in_channels,
31
- num_channels)
32
-
33
- self.in_channels = in_channels
34
- self.num_branches = num_branches
35
-
36
- self.multiscale_output = multiscale_output
37
- self.norm_cfg = norm_cfg
38
- self.conv_cfg = conv_cfg
39
- self.with_cp = with_cp
40
- self.branches = self._make_branches(num_branches, blocks, num_blocks,
41
- num_channels)
42
- self.fuse_layers = self._make_fuse_layers()
43
- self.relu = nn.ReLU(inplace=False)
44
-
45
- def _check_branches(self, num_branches, num_blocks, in_channels,
46
- num_channels):
47
- if num_branches != len(num_blocks):
48
- error_msg = f'NUM_BRANCHES({num_branches}) ' \
49
- f'!= NUM_BLOCKS({len(num_blocks)})'
50
- raise ValueError(error_msg)
51
-
52
- if num_branches != len(num_channels):
53
- error_msg = f'NUM_BRANCHES({num_branches}) ' \
54
- f'!= NUM_CHANNELS({len(num_channels)})'
55
- raise ValueError(error_msg)
56
-
57
- if num_branches != len(in_channels):
58
- error_msg = f'NUM_BRANCHES({num_branches}) ' \
59
- f'!= NUM_INCHANNELS({len(in_channels)})'
60
- raise ValueError(error_msg)
61
-
62
- def _make_one_branch(self,
63
- branch_index,
64
- block,
65
- num_blocks,
66
- num_channels,
67
- stride=1):
68
- downsample = None
69
- if stride != 1 or \
70
- self.in_channels[branch_index] != \
71
- num_channels[branch_index] * block.expansion:
72
- downsample = nn.Sequential(
73
- build_conv_layer(
74
- self.conv_cfg,
75
- self.in_channels[branch_index],
76
- num_channels[branch_index] * block.expansion,
77
- kernel_size=1,
78
- stride=stride,
79
- bias=False),
80
- build_norm_layer(self.norm_cfg, num_channels[branch_index] *
81
- block.expansion)[1])
82
-
83
- layers = []
84
- layers.append(
85
- block(
86
- self.in_channels[branch_index],
87
- num_channels[branch_index],
88
- stride,
89
- downsample=downsample,
90
- with_cp=self.with_cp,
91
- norm_cfg=self.norm_cfg,
92
- conv_cfg=self.conv_cfg))
93
- self.in_channels[branch_index] = \
94
- num_channels[branch_index] * block.expansion
95
- for i in range(1, num_blocks[branch_index]):
96
- layers.append(
97
- block(
98
- self.in_channels[branch_index],
99
- num_channels[branch_index],
100
- with_cp=self.with_cp,
101
- norm_cfg=self.norm_cfg,
102
- conv_cfg=self.conv_cfg))
103
-
104
- return nn.Sequential(*layers)
105
-
106
- def _make_branches(self, num_branches, block, num_blocks, num_channels):
107
- branches = []
108
-
109
- for i in range(num_branches):
110
- branches.append(
111
- self._make_one_branch(i, block, num_blocks, num_channels))
112
-
113
- return nn.ModuleList(branches)
114
-
115
- def _make_fuse_layers(self):
116
- if self.num_branches == 1:
117
- return None
118
-
119
- num_branches = self.num_branches
120
- in_channels = self.in_channels
121
- fuse_layers = []
122
- num_out_branches = num_branches if self.multiscale_output else 1
123
- for i in range(num_out_branches):
124
- fuse_layer = []
125
- for j in range(num_branches):
126
- if j > i:
127
- fuse_layer.append(
128
- nn.Sequential(
129
- build_conv_layer(
130
- self.conv_cfg,
131
- in_channels[j],
132
- in_channels[i],
133
- kernel_size=1,
134
- stride=1,
135
- padding=0,
136
- bias=False),
137
- build_norm_layer(self.norm_cfg, in_channels[i])[1],
138
- nn.Upsample(
139
- scale_factor=2**(j - i), mode='nearest')))
140
- elif j == i:
141
- fuse_layer.append(None)
142
- else:
143
- conv_downsamples = []
144
- for k in range(i - j):
145
- if k == i - j - 1:
146
- conv_downsamples.append(
147
- nn.Sequential(
148
- build_conv_layer(
149
- self.conv_cfg,
150
- in_channels[j],
151
- in_channels[i],
152
- kernel_size=3,
153
- stride=2,
154
- padding=1,
155
- bias=False),
156
- build_norm_layer(self.norm_cfg,
157
- in_channels[i])[1]))
158
- else:
159
- conv_downsamples.append(
160
- nn.Sequential(
161
- build_conv_layer(
162
- self.conv_cfg,
163
- in_channels[j],
164
- in_channels[j],
165
- kernel_size=3,
166
- stride=2,
167
- padding=1,
168
- bias=False),
169
- build_norm_layer(self.norm_cfg,
170
- in_channels[j])[1],
171
- nn.ReLU(inplace=False)))
172
- fuse_layer.append(nn.Sequential(*conv_downsamples))
173
- fuse_layers.append(nn.ModuleList(fuse_layer))
174
-
175
- return nn.ModuleList(fuse_layers)
176
-
177
- def forward(self, x):
178
- """Forward function."""
179
- if self.num_branches == 1:
180
- return [self.branches[0](x[0])]
181
-
182
- for i in range(self.num_branches):
183
- x[i] = self.branches[i](x[i])
184
-
185
- x_fuse = []
186
- for i in range(len(self.fuse_layers)):
187
- y = 0
188
- for j in range(self.num_branches):
189
- if i == j:
190
- y += x[j]
191
- else:
192
- y += self.fuse_layers[i][j](x[j])
193
- x_fuse.append(self.relu(y))
194
- return x_fuse
195
-
196
-
197
- @BACKBONES.register_module()
198
- class HRNet(nn.Module):
199
- """HRNet backbone.
200
-
201
- High-Resolution Representations for Labeling Pixels and Regions
202
- arXiv: https://arxiv.org/abs/1904.04514
203
-
204
- Args:
205
- extra (dict): detailed configuration for each stage of HRNet.
206
- in_channels (int): Number of input image channels. Default: 3.
207
- conv_cfg (dict): dictionary to construct and config conv layer.
208
- norm_cfg (dict): dictionary to construct and config norm layer.
209
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
210
- freeze running stats (mean and var). Note: Effect on Batch Norm
211
- and its variants only.
212
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
213
- memory while slowing down the training speed.
214
- zero_init_residual (bool): whether to use zero init for last norm layer
215
- in resblocks to let them behave as identity.
216
-
217
- Example:
218
- >>> from mmdet.models import HRNet
219
- >>> import torch
220
- >>> extra = dict(
221
- >>> stage1=dict(
222
- >>> num_modules=1,
223
- >>> num_branches=1,
224
- >>> block='BOTTLENECK',
225
- >>> num_blocks=(4, ),
226
- >>> num_channels=(64, )),
227
- >>> stage2=dict(
228
- >>> num_modules=1,
229
- >>> num_branches=2,
230
- >>> block='BASIC',
231
- >>> num_blocks=(4, 4),
232
- >>> num_channels=(32, 64)),
233
- >>> stage3=dict(
234
- >>> num_modules=4,
235
- >>> num_branches=3,
236
- >>> block='BASIC',
237
- >>> num_blocks=(4, 4, 4),
238
- >>> num_channels=(32, 64, 128)),
239
- >>> stage4=dict(
240
- >>> num_modules=3,
241
- >>> num_branches=4,
242
- >>> block='BASIC',
243
- >>> num_blocks=(4, 4, 4, 4),
244
- >>> num_channels=(32, 64, 128, 256)))
245
- >>> self = HRNet(extra, in_channels=1)
246
- >>> self.eval()
247
- >>> inputs = torch.rand(1, 1, 32, 32)
248
- >>> level_outputs = self.forward(inputs)
249
- >>> for level_out in level_outputs:
250
- ... print(tuple(level_out.shape))
251
- (1, 32, 8, 8)
252
- (1, 64, 4, 4)
253
- (1, 128, 2, 2)
254
- (1, 256, 1, 1)
255
- """
256
-
257
- blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
258
-
259
- def __init__(self,
260
- extra,
261
- in_channels=3,
262
- conv_cfg=None,
263
- norm_cfg=dict(type='BN'),
264
- norm_eval=True,
265
- with_cp=False,
266
- zero_init_residual=False):
267
- super(HRNet, self).__init__()
268
- self.extra = extra
269
- self.conv_cfg = conv_cfg
270
- self.norm_cfg = norm_cfg
271
- self.norm_eval = norm_eval
272
- self.with_cp = with_cp
273
- self.zero_init_residual = zero_init_residual
274
-
275
- # stem net
276
- self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
277
- self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
278
-
279
- self.conv1 = build_conv_layer(
280
- self.conv_cfg,
281
- in_channels,
282
- 64,
283
- kernel_size=3,
284
- stride=2,
285
- padding=1,
286
- bias=False)
287
-
288
- self.add_module(self.norm1_name, norm1)
289
- self.conv2 = build_conv_layer(
290
- self.conv_cfg,
291
- 64,
292
- 64,
293
- kernel_size=3,
294
- stride=2,
295
- padding=1,
296
- bias=False)
297
-
298
- self.add_module(self.norm2_name, norm2)
299
- self.relu = nn.ReLU(inplace=True)
300
-
301
- # stage 1
302
- self.stage1_cfg = self.extra['stage1']
303
- num_channels = self.stage1_cfg['num_channels'][0]
304
- block_type = self.stage1_cfg['block']
305
- num_blocks = self.stage1_cfg['num_blocks'][0]
306
-
307
- block = self.blocks_dict[block_type]
308
- stage1_out_channels = num_channels * block.expansion
309
- self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
310
-
311
- # stage 2
312
- self.stage2_cfg = self.extra['stage2']
313
- num_channels = self.stage2_cfg['num_channels']
314
- block_type = self.stage2_cfg['block']
315
-
316
- block = self.blocks_dict[block_type]
317
- num_channels = [channel * block.expansion for channel in num_channels]
318
- self.transition1 = self._make_transition_layer([stage1_out_channels],
319
- num_channels)
320
- self.stage2, pre_stage_channels = self._make_stage(
321
- self.stage2_cfg, num_channels)
322
-
323
- # stage 3
324
- self.stage3_cfg = self.extra['stage3']
325
- num_channels = self.stage3_cfg['num_channels']
326
- block_type = self.stage3_cfg['block']
327
-
328
- block = self.blocks_dict[block_type]
329
- num_channels = [channel * block.expansion for channel in num_channels]
330
- self.transition2 = self._make_transition_layer(pre_stage_channels,
331
- num_channels)
332
- self.stage3, pre_stage_channels = self._make_stage(
333
- self.stage3_cfg, num_channels)
334
-
335
- # stage 4
336
- self.stage4_cfg = self.extra['stage4']
337
- num_channels = self.stage4_cfg['num_channels']
338
- block_type = self.stage4_cfg['block']
339
-
340
- block = self.blocks_dict[block_type]
341
- num_channels = [channel * block.expansion for channel in num_channels]
342
- self.transition3 = self._make_transition_layer(pre_stage_channels,
343
- num_channels)
344
- self.stage4, pre_stage_channels = self._make_stage(
345
- self.stage4_cfg, num_channels)
346
-
347
- @property
348
- def norm1(self):
349
- """nn.Module: the normalization layer named "norm1" """
350
- return getattr(self, self.norm1_name)
351
-
352
- @property
353
- def norm2(self):
354
- """nn.Module: the normalization layer named "norm2" """
355
- return getattr(self, self.norm2_name)
356
-
357
- def _make_transition_layer(self, num_channels_pre_layer,
358
- num_channels_cur_layer):
359
- num_branches_cur = len(num_channels_cur_layer)
360
- num_branches_pre = len(num_channels_pre_layer)
361
-
362
- transition_layers = []
363
- for i in range(num_branches_cur):
364
- if i < num_branches_pre:
365
- if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
366
- transition_layers.append(
367
- nn.Sequential(
368
- build_conv_layer(
369
- self.conv_cfg,
370
- num_channels_pre_layer[i],
371
- num_channels_cur_layer[i],
372
- kernel_size=3,
373
- stride=1,
374
- padding=1,
375
- bias=False),
376
- build_norm_layer(self.norm_cfg,
377
- num_channels_cur_layer[i])[1],
378
- nn.ReLU(inplace=True)))
379
- else:
380
- transition_layers.append(None)
381
- else:
382
- conv_downsamples = []
383
- for j in range(i + 1 - num_branches_pre):
384
- in_channels = num_channels_pre_layer[-1]
385
- out_channels = num_channels_cur_layer[i] \
386
- if j == i - num_branches_pre else in_channels
387
- conv_downsamples.append(
388
- nn.Sequential(
389
- build_conv_layer(
390
- self.conv_cfg,
391
- in_channels,
392
- out_channels,
393
- kernel_size=3,
394
- stride=2,
395
- padding=1,
396
- bias=False),
397
- build_norm_layer(self.norm_cfg, out_channels)[1],
398
- nn.ReLU(inplace=True)))
399
- transition_layers.append(nn.Sequential(*conv_downsamples))
400
-
401
- return nn.ModuleList(transition_layers)
402
-
403
- def _make_layer(self, block, inplanes, planes, blocks, stride=1):
404
- downsample = None
405
- if stride != 1 or inplanes != planes * block.expansion:
406
- downsample = nn.Sequential(
407
- build_conv_layer(
408
- self.conv_cfg,
409
- inplanes,
410
- planes * block.expansion,
411
- kernel_size=1,
412
- stride=stride,
413
- bias=False),
414
- build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
415
-
416
- layers = []
417
- layers.append(
418
- block(
419
- inplanes,
420
- planes,
421
- stride,
422
- downsample=downsample,
423
- with_cp=self.with_cp,
424
- norm_cfg=self.norm_cfg,
425
- conv_cfg=self.conv_cfg))
426
- inplanes = planes * block.expansion
427
- for i in range(1, blocks):
428
- layers.append(
429
- block(
430
- inplanes,
431
- planes,
432
- with_cp=self.with_cp,
433
- norm_cfg=self.norm_cfg,
434
- conv_cfg=self.conv_cfg))
435
-
436
- return nn.Sequential(*layers)
437
-
438
- def _make_stage(self, layer_config, in_channels, multiscale_output=True):
439
- num_modules = layer_config['num_modules']
440
- num_branches = layer_config['num_branches']
441
- num_blocks = layer_config['num_blocks']
442
- num_channels = layer_config['num_channels']
443
- block = self.blocks_dict[layer_config['block']]
444
-
445
- hr_modules = []
446
- for i in range(num_modules):
447
- # multi_scale_output is only used for the last module
448
- if not multiscale_output and i == num_modules - 1:
449
- reset_multiscale_output = False
450
- else:
451
- reset_multiscale_output = True
452
-
453
- hr_modules.append(
454
- HRModule(
455
- num_branches,
456
- block,
457
- num_blocks,
458
- in_channels,
459
- num_channels,
460
- reset_multiscale_output,
461
- with_cp=self.with_cp,
462
- norm_cfg=self.norm_cfg,
463
- conv_cfg=self.conv_cfg))
464
-
465
- return nn.Sequential(*hr_modules), in_channels
466
-
467
- def init_weights(self, pretrained=None):
468
- """Initialize the weights in backbone.
469
-
470
- Args:
471
- pretrained (str, optional): Path to pre-trained weights.
472
- Defaults to None.
473
- """
474
- if isinstance(pretrained, str):
475
- logger = get_root_logger()
476
- load_checkpoint(self, pretrained, strict=False, logger=logger)
477
- elif pretrained is None:
478
- for m in self.modules():
479
- if isinstance(m, nn.Conv2d):
480
- kaiming_init(m)
481
- elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
482
- constant_init(m, 1)
483
-
484
- if self.zero_init_residual:
485
- for m in self.modules():
486
- if isinstance(m, Bottleneck):
487
- constant_init(m.norm3, 0)
488
- elif isinstance(m, BasicBlock):
489
- constant_init(m.norm2, 0)
490
- else:
491
- raise TypeError('pretrained must be a str or None')
492
-
493
- def forward(self, x):
494
- """Forward function."""
495
- x = self.conv1(x)
496
- x = self.norm1(x)
497
- x = self.relu(x)
498
- x = self.conv2(x)
499
- x = self.norm2(x)
500
- x = self.relu(x)
501
- x = self.layer1(x)
502
-
503
- x_list = []
504
- for i in range(self.stage2_cfg['num_branches']):
505
- if self.transition1[i] is not None:
506
- x_list.append(self.transition1[i](x))
507
- else:
508
- x_list.append(x)
509
- y_list = self.stage2(x_list)
510
-
511
- x_list = []
512
- for i in range(self.stage3_cfg['num_branches']):
513
- if self.transition2[i] is not None:
514
- x_list.append(self.transition2[i](y_list[-1]))
515
- else:
516
- x_list.append(y_list[i])
517
- y_list = self.stage3(x_list)
518
-
519
- x_list = []
520
- for i in range(self.stage4_cfg['num_branches']):
521
- if self.transition3[i] is not None:
522
- x_list.append(self.transition3[i](y_list[-1]))
523
- else:
524
- x_list.append(y_list[i])
525
- y_list = self.stage4(x_list)
526
-
527
- return y_list
528
-
529
- def train(self, mode=True):
530
- """Convert the model into training mode will keeping the normalization
531
- layer freezed."""
532
- super(HRNet, self).train(mode)
533
- if mode and self.norm_eval:
534
- for m in self.modules():
535
- # trick: eval have effect on BatchNorm only
536
- if isinstance(m, _BatchNorm):
537
- m.eval()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/ldm/modules/image_degradation/bsrgan_light.py DELETED
@@ -1,651 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import numpy as np
3
- import cv2
4
- import torch
5
-
6
- from functools import partial
7
- import random
8
- from scipy import ndimage
9
- import scipy
10
- import scipy.stats as ss
11
- from scipy.interpolate import interp2d
12
- from scipy.linalg import orth
13
- import albumentations
14
-
15
- import ldm.modules.image_degradation.utils_image as util
16
-
17
- """
18
- # --------------------------------------------
19
- # Super-Resolution
20
- # --------------------------------------------
21
- #
22
- # Kai Zhang ([email protected])
23
- # https://github.com/cszn
24
- # From 2019/03--2021/08
25
- # --------------------------------------------
26
- """
27
-
28
- def modcrop_np(img, sf):
29
- '''
30
- Args:
31
- img: numpy image, WxH or WxHxC
32
- sf: scale factor
33
- Return:
34
- cropped image
35
- '''
36
- w, h = img.shape[:2]
37
- im = np.copy(img)
38
- return im[:w - w % sf, :h - h % sf, ...]
39
-
40
-
41
- """
42
- # --------------------------------------------
43
- # anisotropic Gaussian kernels
44
- # --------------------------------------------
45
- """
46
-
47
-
48
- def analytic_kernel(k):
49
- """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
50
- k_size = k.shape[0]
51
- # Calculate the big kernels size
52
- big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
53
- # Loop over the small kernel to fill the big one
54
- for r in range(k_size):
55
- for c in range(k_size):
56
- big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
57
- # Crop the edges of the big kernel to ignore very small values and increase run time of SR
58
- crop = k_size // 2
59
- cropped_big_k = big_k[crop:-crop, crop:-crop]
60
- # Normalize to 1
61
- return cropped_big_k / cropped_big_k.sum()
62
-
63
-
64
- def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
65
- """ generate an anisotropic Gaussian kernel
66
- Args:
67
- ksize : e.g., 15, kernel size
68
- theta : [0, pi], rotation angle range
69
- l1 : [0.1,50], scaling of eigenvalues
70
- l2 : [0.1,l1], scaling of eigenvalues
71
- If l1 = l2, will get an isotropic Gaussian kernel.
72
- Returns:
73
- k : kernel
74
- """
75
-
76
- v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
77
- V = np.array([[v[0], v[1]], [v[1], -v[0]]])
78
- D = np.array([[l1, 0], [0, l2]])
79
- Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
80
- k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
81
-
82
- return k
83
-
84
-
85
- def gm_blur_kernel(mean, cov, size=15):
86
- center = size / 2.0 + 0.5
87
- k = np.zeros([size, size])
88
- for y in range(size):
89
- for x in range(size):
90
- cy = y - center + 1
91
- cx = x - center + 1
92
- k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
93
-
94
- k = k / np.sum(k)
95
- return k
96
-
97
-
98
- def shift_pixel(x, sf, upper_left=True):
99
- """shift pixel for super-resolution with different scale factors
100
- Args:
101
- x: WxHxC or WxH
102
- sf: scale factor
103
- upper_left: shift direction
104
- """
105
- h, w = x.shape[:2]
106
- shift = (sf - 1) * 0.5
107
- xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
108
- if upper_left:
109
- x1 = xv + shift
110
- y1 = yv + shift
111
- else:
112
- x1 = xv - shift
113
- y1 = yv - shift
114
-
115
- x1 = np.clip(x1, 0, w - 1)
116
- y1 = np.clip(y1, 0, h - 1)
117
-
118
- if x.ndim == 2:
119
- x = interp2d(xv, yv, x)(x1, y1)
120
- if x.ndim == 3:
121
- for i in range(x.shape[-1]):
122
- x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
123
-
124
- return x
125
-
126
-
127
- def blur(x, k):
128
- '''
129
- x: image, NxcxHxW
130
- k: kernel, Nx1xhxw
131
- '''
132
- n, c = x.shape[:2]
133
- p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
134
- x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
135
- k = k.repeat(1, c, 1, 1)
136
- k = k.view(-1, 1, k.shape[2], k.shape[3])
137
- x = x.view(1, -1, x.shape[2], x.shape[3])
138
- x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
139
- x = x.view(n, c, x.shape[2], x.shape[3])
140
-
141
- return x
142
-
143
-
144
- def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
145
- """"
146
- # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
147
- # Kai Zhang
148
- # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
149
- # max_var = 2.5 * sf
150
- """
151
- # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
152
- lambda_1 = min_var + np.random.rand() * (max_var - min_var)
153
- lambda_2 = min_var + np.random.rand() * (max_var - min_var)
154
- theta = np.random.rand() * np.pi # random theta
155
- noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
156
-
157
- # Set COV matrix using Lambdas and Theta
158
- LAMBDA = np.diag([lambda_1, lambda_2])
159
- Q = np.array([[np.cos(theta), -np.sin(theta)],
160
- [np.sin(theta), np.cos(theta)]])
161
- SIGMA = Q @ LAMBDA @ Q.T
162
- INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
163
-
164
- # Set expectation position (shifting kernel for aligned image)
165
- MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
166
- MU = MU[None, None, :, None]
167
-
168
- # Create meshgrid for Gaussian
169
- [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
170
- Z = np.stack([X, Y], 2)[:, :, :, None]
171
-
172
- # Calcualte Gaussian for every pixel of the kernel
173
- ZZ = Z - MU
174
- ZZ_t = ZZ.transpose(0, 1, 3, 2)
175
- raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
176
-
177
- # shift the kernel so it will be centered
178
- # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
179
-
180
- # Normalize the kernel and return
181
- # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
182
- kernel = raw_kernel / np.sum(raw_kernel)
183
- return kernel
184
-
185
-
186
- def fspecial_gaussian(hsize, sigma):
187
- hsize = [hsize, hsize]
188
- siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
189
- std = sigma
190
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
191
- arg = -(x * x + y * y) / (2 * std * std)
192
- h = np.exp(arg)
193
- h[h < scipy.finfo(float).eps * h.max()] = 0
194
- sumh = h.sum()
195
- if sumh != 0:
196
- h = h / sumh
197
- return h
198
-
199
-
200
- def fspecial_laplacian(alpha):
201
- alpha = max([0, min([alpha, 1])])
202
- h1 = alpha / (alpha + 1)
203
- h2 = (1 - alpha) / (alpha + 1)
204
- h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
205
- h = np.array(h)
206
- return h
207
-
208
-
209
- def fspecial(filter_type, *args, **kwargs):
210
- '''
211
- python code from:
212
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
213
- '''
214
- if filter_type == 'gaussian':
215
- return fspecial_gaussian(*args, **kwargs)
216
- if filter_type == 'laplacian':
217
- return fspecial_laplacian(*args, **kwargs)
218
-
219
-
220
- """
221
- # --------------------------------------------
222
- # degradation models
223
- # --------------------------------------------
224
- """
225
-
226
-
227
- def bicubic_degradation(x, sf=3):
228
- '''
229
- Args:
230
- x: HxWxC image, [0, 1]
231
- sf: down-scale factor
232
- Return:
233
- bicubicly downsampled LR image
234
- '''
235
- x = util.imresize_np(x, scale=1 / sf)
236
- return x
237
-
238
-
239
- def srmd_degradation(x, k, sf=3):
240
- ''' blur + bicubic downsampling
241
- Args:
242
- x: HxWxC image, [0, 1]
243
- k: hxw, double
244
- sf: down-scale factor
245
- Return:
246
- downsampled LR image
247
- Reference:
248
- @inproceedings{zhang2018learning,
249
- title={Learning a single convolutional super-resolution network for multiple degradations},
250
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
251
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
252
- pages={3262--3271},
253
- year={2018}
254
- }
255
- '''
256
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
257
- x = bicubic_degradation(x, sf=sf)
258
- return x
259
-
260
-
261
- def dpsr_degradation(x, k, sf=3):
262
- ''' bicubic downsampling + blur
263
- Args:
264
- x: HxWxC image, [0, 1]
265
- k: hxw, double
266
- sf: down-scale factor
267
- Return:
268
- downsampled LR image
269
- Reference:
270
- @inproceedings{zhang2019deep,
271
- title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
272
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
273
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
274
- pages={1671--1681},
275
- year={2019}
276
- }
277
- '''
278
- x = bicubic_degradation(x, sf=sf)
279
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
280
- return x
281
-
282
-
283
- def classical_degradation(x, k, sf=3):
284
- ''' blur + downsampling
285
- Args:
286
- x: HxWxC image, [0, 1]/[0, 255]
287
- k: hxw, double
288
- sf: down-scale factor
289
- Return:
290
- downsampled LR image
291
- '''
292
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
293
- # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
294
- st = 0
295
- return x[st::sf, st::sf, ...]
296
-
297
-
298
- def add_sharpening(img, weight=0.5, radius=50, threshold=10):
299
- """USM sharpening. borrowed from real-ESRGAN
300
- Input image: I; Blurry image: B.
301
- 1. K = I + weight * (I - B)
302
- 2. Mask = 1 if abs(I - B) > threshold, else: 0
303
- 3. Blur mask:
304
- 4. Out = Mask * K + (1 - Mask) * I
305
- Args:
306
- img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
307
- weight (float): Sharp weight. Default: 1.
308
- radius (float): Kernel size of Gaussian blur. Default: 50.
309
- threshold (int):
310
- """
311
- if radius % 2 == 0:
312
- radius += 1
313
- blur = cv2.GaussianBlur(img, (radius, radius), 0)
314
- residual = img - blur
315
- mask = np.abs(residual) * 255 > threshold
316
- mask = mask.astype('float32')
317
- soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
318
-
319
- K = img + weight * residual
320
- K = np.clip(K, 0, 1)
321
- return soft_mask * K + (1 - soft_mask) * img
322
-
323
-
324
- def add_blur(img, sf=4):
325
- wd2 = 4.0 + sf
326
- wd = 2.0 + 0.2 * sf
327
-
328
- wd2 = wd2/4
329
- wd = wd/4
330
-
331
- if random.random() < 0.5:
332
- l1 = wd2 * random.random()
333
- l2 = wd2 * random.random()
334
- k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
335
- else:
336
- k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
337
- img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
338
-
339
- return img
340
-
341
-
342
- def add_resize(img, sf=4):
343
- rnum = np.random.rand()
344
- if rnum > 0.8: # up
345
- sf1 = random.uniform(1, 2)
346
- elif rnum < 0.7: # down
347
- sf1 = random.uniform(0.5 / sf, 1)
348
- else:
349
- sf1 = 1.0
350
- img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
351
- img = np.clip(img, 0.0, 1.0)
352
-
353
- return img
354
-
355
-
356
- # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
357
- # noise_level = random.randint(noise_level1, noise_level2)
358
- # rnum = np.random.rand()
359
- # if rnum > 0.6: # add color Gaussian noise
360
- # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
361
- # elif rnum < 0.4: # add grayscale Gaussian noise
362
- # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
363
- # else: # add noise
364
- # L = noise_level2 / 255.
365
- # D = np.diag(np.random.rand(3))
366
- # U = orth(np.random.rand(3, 3))
367
- # conv = np.dot(np.dot(np.transpose(U), D), U)
368
- # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
369
- # img = np.clip(img, 0.0, 1.0)
370
- # return img
371
-
372
- def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
373
- noise_level = random.randint(noise_level1, noise_level2)
374
- rnum = np.random.rand()
375
- if rnum > 0.6: # add color Gaussian noise
376
- img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
377
- elif rnum < 0.4: # add grayscale Gaussian noise
378
- img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
379
- else: # add noise
380
- L = noise_level2 / 255.
381
- D = np.diag(np.random.rand(3))
382
- U = orth(np.random.rand(3, 3))
383
- conv = np.dot(np.dot(np.transpose(U), D), U)
384
- img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
385
- img = np.clip(img, 0.0, 1.0)
386
- return img
387
-
388
-
389
- def add_speckle_noise(img, noise_level1=2, noise_level2=25):
390
- noise_level = random.randint(noise_level1, noise_level2)
391
- img = np.clip(img, 0.0, 1.0)
392
- rnum = random.random()
393
- if rnum > 0.6:
394
- img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
395
- elif rnum < 0.4:
396
- img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
397
- else:
398
- L = noise_level2 / 255.
399
- D = np.diag(np.random.rand(3))
400
- U = orth(np.random.rand(3, 3))
401
- conv = np.dot(np.dot(np.transpose(U), D), U)
402
- img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
403
- img = np.clip(img, 0.0, 1.0)
404
- return img
405
-
406
-
407
- def add_Poisson_noise(img):
408
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
409
- vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
410
- if random.random() < 0.5:
411
- img = np.random.poisson(img * vals).astype(np.float32) / vals
412
- else:
413
- img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
414
- img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
415
- noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
416
- img += noise_gray[:, :, np.newaxis]
417
- img = np.clip(img, 0.0, 1.0)
418
- return img
419
-
420
-
421
- def add_JPEG_noise(img):
422
- quality_factor = random.randint(80, 95)
423
- img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
424
- result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
425
- img = cv2.imdecode(encimg, 1)
426
- img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
427
- return img
428
-
429
-
430
- def random_crop(lq, hq, sf=4, lq_patchsize=64):
431
- h, w = lq.shape[:2]
432
- rnd_h = random.randint(0, h - lq_patchsize)
433
- rnd_w = random.randint(0, w - lq_patchsize)
434
- lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
435
-
436
- rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
437
- hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
438
- return lq, hq
439
-
440
-
441
- def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
442
- """
443
- This is the degradation model of BSRGAN from the paper
444
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
445
- ----------
446
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
447
- sf: scale factor
448
- isp_model: camera ISP model
449
- Returns
450
- -------
451
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
452
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
453
- """
454
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
455
- sf_ori = sf
456
-
457
- h1, w1 = img.shape[:2]
458
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
459
- h, w = img.shape[:2]
460
-
461
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
462
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
463
-
464
- hq = img.copy()
465
-
466
- if sf == 4 and random.random() < scale2_prob: # downsample1
467
- if np.random.rand() < 0.5:
468
- img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
469
- interpolation=random.choice([1, 2, 3]))
470
- else:
471
- img = util.imresize_np(img, 1 / 2, True)
472
- img = np.clip(img, 0.0, 1.0)
473
- sf = 2
474
-
475
- shuffle_order = random.sample(range(7), 7)
476
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
477
- if idx1 > idx2: # keep downsample3 last
478
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
479
-
480
- for i in shuffle_order:
481
-
482
- if i == 0:
483
- img = add_blur(img, sf=sf)
484
-
485
- elif i == 1:
486
- img = add_blur(img, sf=sf)
487
-
488
- elif i == 2:
489
- a, b = img.shape[1], img.shape[0]
490
- # downsample2
491
- if random.random() < 0.75:
492
- sf1 = random.uniform(1, 2 * sf)
493
- img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
494
- interpolation=random.choice([1, 2, 3]))
495
- else:
496
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
497
- k_shifted = shift_pixel(k, sf)
498
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
499
- img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
500
- img = img[0::sf, 0::sf, ...] # nearest downsampling
501
- img = np.clip(img, 0.0, 1.0)
502
-
503
- elif i == 3:
504
- # downsample3
505
- img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
506
- img = np.clip(img, 0.0, 1.0)
507
-
508
- elif i == 4:
509
- # add Gaussian noise
510
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
511
-
512
- elif i == 5:
513
- # add JPEG noise
514
- if random.random() < jpeg_prob:
515
- img = add_JPEG_noise(img)
516
-
517
- elif i == 6:
518
- # add processed camera sensor noise
519
- if random.random() < isp_prob and isp_model is not None:
520
- with torch.no_grad():
521
- img, hq = isp_model.forward(img.copy(), hq)
522
-
523
- # add final JPEG compression noise
524
- img = add_JPEG_noise(img)
525
-
526
- # random crop
527
- img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
528
-
529
- return img, hq
530
-
531
-
532
- # todo no isp_model?
533
- def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False):
534
- """
535
- This is the degradation model of BSRGAN from the paper
536
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
537
- ----------
538
- sf: scale factor
539
- isp_model: camera ISP model
540
- Returns
541
- -------
542
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
543
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
544
- """
545
- image = util.uint2single(image)
546
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
547
- sf_ori = sf
548
-
549
- h1, w1 = image.shape[:2]
550
- image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
551
- h, w = image.shape[:2]
552
-
553
- hq = image.copy()
554
-
555
- if sf == 4 and random.random() < scale2_prob: # downsample1
556
- if np.random.rand() < 0.5:
557
- image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
558
- interpolation=random.choice([1, 2, 3]))
559
- else:
560
- image = util.imresize_np(image, 1 / 2, True)
561
- image = np.clip(image, 0.0, 1.0)
562
- sf = 2
563
-
564
- shuffle_order = random.sample(range(7), 7)
565
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
566
- if idx1 > idx2: # keep downsample3 last
567
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
568
-
569
- for i in shuffle_order:
570
-
571
- if i == 0:
572
- image = add_blur(image, sf=sf)
573
-
574
- # elif i == 1:
575
- # image = add_blur(image, sf=sf)
576
-
577
- if i == 0:
578
- pass
579
-
580
- elif i == 2:
581
- a, b = image.shape[1], image.shape[0]
582
- # downsample2
583
- if random.random() < 0.8:
584
- sf1 = random.uniform(1, 2 * sf)
585
- image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
586
- interpolation=random.choice([1, 2, 3]))
587
- else:
588
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
589
- k_shifted = shift_pixel(k, sf)
590
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
591
- image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
592
- image = image[0::sf, 0::sf, ...] # nearest downsampling
593
-
594
- image = np.clip(image, 0.0, 1.0)
595
-
596
- elif i == 3:
597
- # downsample3
598
- image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
599
- image = np.clip(image, 0.0, 1.0)
600
-
601
- elif i == 4:
602
- # add Gaussian noise
603
- image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
604
-
605
- elif i == 5:
606
- # add JPEG noise
607
- if random.random() < jpeg_prob:
608
- image = add_JPEG_noise(image)
609
- #
610
- # elif i == 6:
611
- # # add processed camera sensor noise
612
- # if random.random() < isp_prob and isp_model is not None:
613
- # with torch.no_grad():
614
- # img, hq = isp_model.forward(img.copy(), hq)
615
-
616
- # add final JPEG compression noise
617
- image = add_JPEG_noise(image)
618
- image = util.single2uint(image)
619
- if up:
620
- image = cv2.resize(image, (w1, h1), interpolation=cv2.INTER_CUBIC) # todo: random, as above? want to condition on it then
621
- example = {"image": image}
622
- return example
623
-
624
-
625
-
626
-
627
- if __name__ == '__main__':
628
- print("hey")
629
- img = util.imread_uint('utils/test.png', 3)
630
- img = img[:448, :448]
631
- h = img.shape[0] // 4
632
- print("resizing to", h)
633
- sf = 4
634
- deg_fn = partial(degradation_bsrgan_variant, sf=sf)
635
- for i in range(20):
636
- print(i)
637
- img_hq = img
638
- img_lq = deg_fn(img)["image"]
639
- img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
640
- print(img_lq)
641
- img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
642
- print(img_lq.shape)
643
- print("bicubic", img_lq_bicubic.shape)
644
- print(img_hq.shape)
645
- lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
646
- interpolation=0)
647
- lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
648
- (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
649
- interpolation=0)
650
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
651
- util.imsave(img_concat, str(i) + '.png')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aqdas/YouTube_Video_OpenAI_whisper/app.py DELETED
@@ -1,17 +0,0 @@
1
- import streamlit as st
2
- from whisper import dowload_youtube_video, transcribe_audio
3
- import os
4
-
5
-
6
- st.title("Youtube Video + OpenAI Whisper")
7
- if st.text_input('Please Enter the access code') == os.environ['password']:
8
-
9
- user_input = st.text_input('Enter Your YouTube URL')
10
-
11
- with st.spinner('Sit back and relax. It takes a minute.'):
12
- if st.button('Transcribe'):
13
- if user_input:
14
- download_audio = dowload_youtube_video(user_input)
15
- st.write(transcribe_audio())
16
-
17
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/AltDiffusion/header.html DELETED
@@ -1,43 +0,0 @@
1
- <div style="text-align: center; max-width: 650px; margin: 0 auto;">
2
- <div
3
- style="
4
- display: inline-flex;
5
- gap: 0.8rem;
6
- font-size: 1.75rem;
7
- margin-bottom: 10px;
8
- width: 600px;
9
- height: 200px;
10
- margin: 0 auto;
11
- /* border: 1px solid red; */
12
- justify-content: center;
13
- "
14
-
15
- <a href="https://github.com/FlagAI-Open/FlagAI"><img src="https://raw.githubusercontent.com/920232796/test/master/WechatIMG6906.png" alt="FlagAI" width="80%" height="80%" style="margin: 0 auto;"></a>
16
- </div>
17
- <div
18
- style="
19
- display: inline-flex;
20
- align-items: center;
21
- gap: 0.8rem;
22
- font-size: 1.75rem;
23
- margin-bottom: 10px;
24
- justify-content: center;
25
- ">
26
- <a href="https://github.com/FlagAI-Open/FlagAI"><h1 style="font-weight: 900; margin-bottom: 7px;">
27
- FlagStudio
28
- </h1></a>
29
- </div>
30
- <p style="margin-bottom: 10px; font-size: 94%">
31
- FlagStudio 项目致力于贡献优秀AI生成艺术作品。此双语文生图模型项目基于 <a href="https://huggingface.co/CompVis/stable-diffusion" style="text-decoration: underline;">stable diffusion</a>,由BAAI旗下的FlagAI团队提供支持,相关代码和模型权重在<a href="https://github.com/FlagAI-Open/FlagAI/tree/master/examples/AltDiffusion" style="text-decoration: underline;">AltDiffusion</a>中进行开源。
32
- </p>
33
- <p style="margin-bottom: 10px; font-size: 94%">
34
- FlagStudio aims to provide high quality AI-generated artwork. Our current bilingual model is based on the original <a href="https://huggingface.co/CompVis/stable-diffusion" style="text-decoration: underline;">stable diffusion</a> model and is capable to generate images from both Chinese and English text. FlagStudio is developed and supported by the FlagAI team. Relevant code and model weights released in <a href="https://github.com/FlagAI-Open/FlagAI/tree/master/examples/AltDiffusion" style="text-decoration: underline;">AltDiffusion</a>.([email protected]
35
- </p>
36
- <p style="margin-bottom: 10px; font-size: 94%">
37
- AltDiffusion has been added to 🧨Diffusers, see the documentation page: <a href="https://huggingface.co/docs/diffusers/main/en/api/pipelines/alt_diffusion">🧨 Pipeline doc</a>
38
- </p>
39
- <p style="margin-bottom: 10px; font-size: 94%; text-align: left;">
40
- 我们在colab设置了一个脚本,你可以在colab试用我们的模型!(We have a script on colab, You can try our models on colab.Enjoy it!)
41
- <a href="https://colab.research.google.com/drive/1htPovT5YNutl2i31mIYrOzlIgGLm06IX#scrollTo=0KXFRkjG1RVk"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
42
- </p>
43
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BIASLab/sars-cov-2-classification-fcgr/src/pipeline.py DELETED
@@ -1,85 +0,0 @@
1
-
2
- import json
3
- from pathlib import Path
4
- from collections import OrderedDict
5
- from typing import List, Tuple, Optional, Union
6
-
7
- FUNCTIONS_PIPELINE = OrderedDict()
8
-
9
- def register_in_pipeline(func):
10
- """Collect functions for the pipeline"""
11
- print(f"Adding {func.__name__}")
12
- if func.__name__ not in FUNCTIONS_PIPELINE:
13
- FUNCTIONS_PIPELINE[func.__name__] = func
14
- else:
15
- raise Exception(f"Duplicated function with name {func.__name__}")
16
-
17
- class Pipeline:
18
- """Define a sequence of functions to be applied to one input"""
19
- FUNCTIONS_PIPELINE = FUNCTIONS_PIPELINE
20
- def __init__(self, pipeline: Optional[List[Tuple[str, dict]]] = None):
21
- self.pipeline = pipeline if pipeline else []
22
-
23
- def __call__(self, x):
24
- """Apply pipeline to the input 'x'"""
25
- for pipe in self.pipeline:
26
- func_name, *args, kwargs = pipe
27
- assert isinstance(kwargs, dict), f"Wrong declaration in {func_name!r}. Must be (str, dict) or (str, tuple, dict)"
28
- # apply preprocessing
29
- if args:
30
- #print("args and kwargs")
31
- x = self.apply(x, func_name, *args, **kwargs)
32
- else:
33
- #print("only kwargs")
34
- x = self.apply(x, func_name, **kwargs)
35
- return x
36
-
37
- @classmethod
38
- def apply(cls, x, func, *args, **kwargs):
39
- """Compute func(x, *args, **kwargs)"""
40
- if func in cls.FUNCTIONS_PIPELINE:
41
- return cls.FUNCTIONS_PIPELINE[func](x, *args, **kwargs)
42
- else:
43
- raise TypeError(f"{func} not available")
44
-
45
- def __gt__(self, add_pipe: Union[List,Tuple]):
46
- """Add a pipe ("func_name", args, kwargs) or ("func_name", kwargs) to the current pipeline"""
47
- if self.is_available(add_pipe[0]):
48
- self.pipeline.append(add_pipe)
49
- return self
50
- else:
51
- raise NotImplementedError(f"{add_pipe[0]!r} not available in Pipeline")
52
-
53
- def is_available(self, func_name: str):
54
- """Return True if the function 'func_name' is available in Pipeline"""
55
- return True if func_name in self.FUNCTIONS_PIPELINE else False
56
-
57
- def asJSON(self, path_save: str =None):
58
- """Save pipeline configuration as json file"""
59
- path_save = Path(path_save) if path_save else Path("pipeline.json")
60
- with open(path_save, "w", encoding="utf8") as fp:
61
- json.dump(self.pipeline, fp, indent=4, ensure_ascii=False)
62
- print(f"Pipeline configuration saved at {path_save!r}")
63
-
64
- def fromJSON(self, path_pipeline: str):
65
- """Load pipeline configuration from json file"""
66
- path_pipeline = Path(path_pipeline)
67
- with open(path_pipeline, "r", encoding="utf8") as fp:
68
- pipeline = json.load(fp)
69
-
70
- # Corrobate that all functions are availables
71
- available_functions = {pipe[0]: self.is_available(pipe[0])
72
- for pipe in pipeline}
73
-
74
- # TODO: change with the right Exception here
75
- if not all(available_functions.values()):
76
- print("""
77
- Some functions are not availables.
78
- Please use the @register_in_pipeline decorator to include this functions to the Pipeline.
79
- """)
80
- functions_not_availables = dict(filter(lambda item: item[0], available_functions.items()))
81
- return [func_name for func_name, available in functions_not_availables.items()
82
- if available is False]
83
-
84
- self.pipeline = pipeline
85
- print(f"Pipeline loaded from {path_pipeline!r}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/guidml.py DELETED
@@ -1,710 +0,0 @@
1
- """
2
- 0416后的更新:
3
- 引入config中half
4
- 重建npy而不用填写
5
- v2支持
6
- 无f0模型支持
7
- 修复
8
-
9
- int16:
10
- 增加无索引支持
11
- f0算法改harvest(怎么看就只有这个会影响CPU占用),但是不这么改效果不好
12
- """
13
- import os, sys, traceback, re
14
-
15
- import json
16
-
17
- now_dir = os.getcwd()
18
- sys.path.append(now_dir)
19
- from configs.config import Config
20
-
21
- Config = Config()
22
-
23
- import torch_directml
24
- import PySimpleGUI as sg
25
- import sounddevice as sd
26
- import noisereduce as nr
27
- import numpy as np
28
- from fairseq import checkpoint_utils
29
- import librosa, torch, pyworld, faiss, time, threading
30
- import torch.nn.functional as F
31
- import torchaudio.transforms as tat
32
- import scipy.signal as signal
33
-
34
-
35
- # import matplotlib.pyplot as plt
36
- from lib.infer_pack.models import (
37
- SynthesizerTrnMs256NSFsid,
38
- SynthesizerTrnMs256NSFsid_nono,
39
- SynthesizerTrnMs768NSFsid,
40
- SynthesizerTrnMs768NSFsid_nono,
41
- )
42
- from i18n import I18nAuto
43
-
44
- i18n = I18nAuto()
45
- device = torch_directml.device(torch_directml.default_device())
46
- current_dir = os.getcwd()
47
-
48
-
49
- class RVC:
50
- def __init__(
51
- self, key, hubert_path, pth_path, index_path, npy_path, index_rate
52
- ) -> None:
53
- """
54
- 初始化
55
- """
56
- try:
57
- self.f0_up_key = key
58
- self.time_step = 160 / 16000 * 1000
59
- self.f0_min = 50
60
- self.f0_max = 1100
61
- self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700)
62
- self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700)
63
- self.sr = 16000
64
- self.window = 160
65
- if index_rate != 0:
66
- self.index = faiss.read_index(index_path)
67
- # self.big_npy = np.load(npy_path)
68
- self.big_npy = self.index.reconstruct_n(0, self.index.ntotal)
69
- print("index search enabled")
70
- self.index_rate = index_rate
71
- model_path = hubert_path
72
- print("load model(s) from {}".format(model_path))
73
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
74
- [model_path],
75
- suffix="",
76
- )
77
- self.model = models[0]
78
- self.model = self.model.to(device)
79
- if Config.is_half:
80
- self.model = self.model.half()
81
- else:
82
- self.model = self.model.float()
83
- self.model.eval()
84
- cpt = torch.load(pth_path, map_location="cpu")
85
- self.tgt_sr = cpt["config"][-1]
86
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
87
- self.if_f0 = cpt.get("f0", 1)
88
- self.version = cpt.get("version", "v1")
89
- if self.version == "v1":
90
- if self.if_f0 == 1:
91
- self.net_g = SynthesizerTrnMs256NSFsid(
92
- *cpt["config"], is_half=Config.is_half
93
- )
94
- else:
95
- self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
96
- elif self.version == "v2":
97
- if self.if_f0 == 1:
98
- self.net_g = SynthesizerTrnMs768NSFsid(
99
- *cpt["config"], is_half=Config.is_half
100
- )
101
- else:
102
- self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
103
- del self.net_g.enc_q
104
- print(self.net_g.load_state_dict(cpt["weight"], strict=False))
105
- self.net_g.eval().to(device)
106
- if Config.is_half:
107
- self.net_g = self.net_g.half()
108
- else:
109
- self.net_g = self.net_g.float()
110
- except:
111
- print(traceback.format_exc())
112
-
113
- def get_f0(self, x, f0_up_key, inp_f0=None):
114
- x_pad = 1
115
- f0_min = 50
116
- f0_max = 1100
117
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
118
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
119
- f0, t = pyworld.harvest(
120
- x.astype(np.double),
121
- fs=self.sr,
122
- f0_ceil=f0_max,
123
- f0_floor=f0_min,
124
- frame_period=10,
125
- )
126
- f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr)
127
- f0 = signal.medfilt(f0, 3)
128
- f0 *= pow(2, f0_up_key / 12)
129
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
130
- tf0 = self.sr // self.window # 每秒f0点数
131
- if inp_f0 is not None:
132
- delta_t = np.round(
133
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
134
- ).astype("int16")
135
- replace_f0 = np.interp(
136
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
137
- )
138
- shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0]
139
- f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape]
140
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
141
- f0bak = f0.copy()
142
- f0_mel = 1127 * np.log(1 + f0 / 700)
143
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
144
- f0_mel_max - f0_mel_min
145
- ) + 1
146
- f0_mel[f0_mel <= 1] = 1
147
- f0_mel[f0_mel > 255] = 255
148
- f0_coarse = np.rint(f0_mel).astype(np.int)
149
- return f0_coarse, f0bak # 1-0
150
-
151
- def infer(self, feats: torch.Tensor) -> np.ndarray:
152
- """
153
- 推理函数
154
- """
155
- audio = feats.clone().cpu().numpy()
156
- assert feats.dim() == 1, feats.dim()
157
- feats = feats.view(1, -1)
158
- padding_mask = torch.BoolTensor(feats.shape).fill_(False)
159
- if Config.is_half:
160
- feats = feats.half()
161
- else:
162
- feats = feats.float()
163
- inputs = {
164
- "source": feats.to(device),
165
- "padding_mask": padding_mask.to(device),
166
- "output_layer": 9 if self.version == "v1" else 12,
167
- }
168
- torch.cuda.synchronize()
169
- with torch.no_grad():
170
- logits = self.model.extract_features(**inputs)
171
- feats = (
172
- self.model.final_proj(logits[0]) if self.version == "v1" else logits[0]
173
- )
174
-
175
- ####索引优化
176
- try:
177
- if (
178
- hasattr(self, "index")
179
- and hasattr(self, "big_npy")
180
- and self.index_rate != 0
181
- ):
182
- npy = feats[0].cpu().numpy().astype("float32")
183
- score, ix = self.index.search(npy, k=8)
184
- weight = np.square(1 / score)
185
- weight /= weight.sum(axis=1, keepdims=True)
186
- npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
187
- if Config.is_half:
188
- npy = npy.astype("float16")
189
- feats = (
190
- torch.from_numpy(npy).unsqueeze(0).to(device) * self.index_rate
191
- + (1 - self.index_rate) * feats
192
- )
193
- else:
194
- print("index search FAIL or disabled")
195
- except:
196
- traceback.print_exc()
197
- print("index search FAIL")
198
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
199
- torch.cuda.synchronize()
200
- print(feats.shape)
201
- if self.if_f0 == 1:
202
- pitch, pitchf = self.get_f0(audio, self.f0_up_key)
203
- p_len = min(feats.shape[1], 13000, pitch.shape[0]) # 太大了爆显存
204
- else:
205
- pitch, pitchf = None, None
206
- p_len = min(feats.shape[1], 13000) # 太大了爆显存
207
- torch.cuda.synchronize()
208
- # print(feats.shape,pitch.shape)
209
- feats = feats[:, :p_len, :]
210
- if self.if_f0 == 1:
211
- pitch = pitch[:p_len]
212
- pitchf = pitchf[:p_len]
213
- pitch = torch.LongTensor(pitch).unsqueeze(0).to(device)
214
- pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device)
215
- p_len = torch.LongTensor([p_len]).to(device)
216
- ii = 0 # sid
217
- sid = torch.LongTensor([ii]).to(device)
218
- with torch.no_grad():
219
- if self.if_f0 == 1:
220
- infered_audio = (
221
- self.net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
222
- .data.cpu()
223
- .float()
224
- )
225
- else:
226
- infered_audio = (
227
- self.net_g.infer(feats, p_len, sid)[0][0, 0].data.cpu().float()
228
- )
229
- torch.cuda.synchronize()
230
- return infered_audio
231
-
232
-
233
- class GUIConfig:
234
- def __init__(self) -> None:
235
- self.hubert_path: str = ""
236
- self.pth_path: str = ""
237
- self.index_path: str = ""
238
- self.npy_path: str = ""
239
- self.pitch: int = 12
240
- self.samplerate: int = 44100
241
- self.block_time: float = 1.0 # s
242
- self.buffer_num: int = 1
243
- self.threhold: int = -30
244
- self.crossfade_time: float = 0.08
245
- self.extra_time: float = 0.04
246
- self.I_noise_reduce = False
247
- self.O_noise_reduce = False
248
- self.index_rate = 0.3
249
-
250
-
251
- class GUI:
252
- def __init__(self) -> None:
253
- self.config = GUIConfig()
254
- self.flag_vc = False
255
-
256
- self.launcher()
257
-
258
- def load(self):
259
- (
260
- input_devices,
261
- output_devices,
262
- input_devices_indices,
263
- output_devices_indices,
264
- ) = self.get_devices()
265
- try:
266
- with open("values1.json", "r") as j:
267
- data = json.load(j)
268
- except:
269
- with open("values1.json", "w") as j:
270
- data = {
271
- "pth_path": "",
272
- "index_path": "",
273
- "sg_input_device": input_devices[
274
- input_devices_indices.index(sd.default.device[0])
275
- ],
276
- "sg_output_device": output_devices[
277
- output_devices_indices.index(sd.default.device[1])
278
- ],
279
- "threhold": "-45",
280
- "pitch": "0",
281
- "index_rate": "0",
282
- "block_time": "1",
283
- "crossfade_length": "0.04",
284
- "extra_time": "1",
285
- }
286
- return data
287
-
288
- def launcher(self):
289
- data = self.load()
290
- sg.theme("LightBlue3")
291
- input_devices, output_devices, _, _ = self.get_devices()
292
- layout = [
293
- [
294
- sg.Frame(
295
- title=i18n("Load model"),
296
- layout=[
297
- [
298
- sg.Input(
299
- default_text="hubert_base.pt",
300
- key="hubert_path",
301
- disabled=True,
302
- ),
303
- sg.FileBrowse(
304
- i18n("Hubert Model"),
305
- initial_folder=os.path.join(os.getcwd()),
306
- file_types=(("pt files", "*.pt"),),
307
- ),
308
- ],
309
- [
310
- sg.Input(
311
- default_text=data.get("pth_path", ""),
312
- key="pth_path",
313
- ),
314
- sg.FileBrowse(
315
- i18n("Select the .pth file"),
316
- initial_folder=os.path.join(os.getcwd(), "weights"),
317
- file_types=(("weight files", "*.pth"),),
318
- ),
319
- ],
320
- [
321
- sg.Input(
322
- default_text=data.get("index_path", ""),
323
- key="index_path",
324
- ),
325
- sg.FileBrowse(
326
- i18n("Select the .index file"),
327
- initial_folder=os.path.join(os.getcwd(), "logs"),
328
- file_types=(("index files", "*.index"),),
329
- ),
330
- ],
331
- [
332
- sg.Input(
333
- default_text="你不需要填写这个You don't need write this.",
334
- key="npy_path",
335
- disabled=True,
336
- ),
337
- sg.FileBrowse(
338
- i18n("Select the .npy file"),
339
- initial_folder=os.path.join(os.getcwd(), "logs"),
340
- file_types=(("feature files", "*.npy"),),
341
- ),
342
- ],
343
- ],
344
- )
345
- ],
346
- [
347
- sg.Frame(
348
- layout=[
349
- [
350
- sg.Text(i18n("Input device")),
351
- sg.Combo(
352
- input_devices,
353
- key="sg_input_device",
354
- default_value=data.get("sg_input_device", ""),
355
- ),
356
- ],
357
- [
358
- sg.Text(i18n("Output device")),
359
- sg.Combo(
360
- output_devices,
361
- key="sg_output_device",
362
- default_value=data.get("sg_output_device", ""),
363
- ),
364
- ],
365
- ],
366
- title=i18n("Audio device (please use the same type of driver)"),
367
- )
368
- ],
369
- [
370
- sg.Frame(
371
- layout=[
372
- [
373
- sg.Text(i18n("Response threshold")),
374
- sg.Slider(
375
- range=(-60, 0),
376
- key="threhold",
377
- resolution=1,
378
- orientation="h",
379
- default_value=data.get("threhold", ""),
380
- ),
381
- ],
382
- [
383
- sg.Text(i18n("Pitch settings")),
384
- sg.Slider(
385
- range=(-24, 24),
386
- key="pitch",
387
- resolution=1,
388
- orientation="h",
389
- default_value=data.get("pitch", ""),
390
- ),
391
- ],
392
- [
393
- sg.Text(i18n("Index Rate")),
394
- sg.Slider(
395
- range=(0.0, 1.0),
396
- key="index_rate",
397
- resolution=0.01,
398
- orientation="h",
399
- default_value=data.get("index_rate", ""),
400
- ),
401
- ],
402
- ],
403
- title=i18n("General settings"),
404
- ),
405
- sg.Frame(
406
- layout=[
407
- [
408
- sg.Text(i18n("Sample length")),
409
- sg.Slider(
410
- range=(0.1, 3.0),
411
- key="block_time",
412
- resolution=0.1,
413
- orientation="h",
414
- default_value=data.get("block_time", ""),
415
- ),
416
- ],
417
- [
418
- sg.Text(i18n("Fade length")),
419
- sg.Slider(
420
- range=(0.01, 0.15),
421
- key="crossfade_length",
422
- resolution=0.01,
423
- orientation="h",
424
- default_value=data.get("crossfade_length", ""),
425
- ),
426
- ],
427
- [
428
- sg.Text(i18n("Extra推理时长")),
429
- sg.Slider(
430
- range=(0.05, 3.00),
431
- key="extra_time",
432
- resolution=0.01,
433
- orientation="h",
434
- default_value=data.get("extra_time", ""),
435
- ),
436
- ],
437
- [
438
- sg.Checkbox(i18n("Input noise reduction"), key="I_noise_reduce"),
439
- sg.Checkbox(i18n("Output noise reduction"), key="O_noise_reduce"),
440
- ],
441
- ],
442
- title=i18n("Performance settings"),
443
- ),
444
- ],
445
- [
446
- sg.Button(i18n("开始音频Convert"), key="start_vc"),
447
- sg.Button(i18n("停止音频Convert"), key="stop_vc"),
448
- sg.Text(i18n("Inference time (ms):")),
449
- sg.Text("0", key="infer_time"),
450
- ],
451
- ]
452
- self.window = sg.Window("RVC - GUI", layout=layout)
453
- self.event_handler()
454
-
455
- def event_handler(self):
456
- while True:
457
- event, values = self.window.read()
458
- if event == sg.WINDOW_CLOSED:
459
- self.flag_vc = False
460
- exit()
461
- if event == "start_vc" and self.flag_vc == False:
462
- if self.set_values(values) == True:
463
- print("using_cuda:" + str(torch.cuda.is_available()))
464
- self.start_vc()
465
- settings = {
466
- "pth_path": values["pth_path"],
467
- "index_path": values["index_path"],
468
- "sg_input_device": values["sg_input_device"],
469
- "sg_output_device": values["sg_output_device"],
470
- "threhold": values["threhold"],
471
- "pitch": values["pitch"],
472
- "index_rate": values["index_rate"],
473
- "block_time": values["block_time"],
474
- "crossfade_length": values["crossfade_length"],
475
- "extra_time": values["extra_time"],
476
- }
477
- with open("values1.json", "w") as j:
478
- json.dump(settings, j)
479
- if event == "stop_vc" and self.flag_vc == True:
480
- self.flag_vc = False
481
-
482
- def set_values(self, values):
483
- if len(values["pth_path"].strip()) == 0:
484
- sg.popup(i18n("Select the pth file"))
485
- return False
486
- if len(values["index_path"].strip()) == 0:
487
- sg.popup(i18n("Select the index file"))
488
- return False
489
- pattern = re.compile("[^\x00-\x7F]+")
490
- if pattern.findall(values["hubert_path"]):
491
- sg.popup(i18n("The hubert model path must not contain Chinese characters"))
492
- return False
493
- if pattern.findall(values["pth_path"]):
494
- sg.popup(i18n("The pth file path must not contain Chinese characters."))
495
- return False
496
- if pattern.findall(values["index_path"]):
497
- sg.popup(i18n("The index file path must not contain Chinese characters."))
498
- return False
499
- self.set_devices(values["sg_input_device"], values["sg_output_device"])
500
- self.config.hubert_path = os.path.join(current_dir, "hubert_base.pt")
501
- self.config.pth_path = values["pth_path"]
502
- self.config.index_path = values["index_path"]
503
- self.config.npy_path = values["npy_path"]
504
- self.config.threhold = values["threhold"]
505
- self.config.pitch = values["pitch"]
506
- self.config.block_time = values["block_time"]
507
- self.config.crossfade_time = values["crossfade_length"]
508
- self.config.extra_time = values["extra_time"]
509
- self.config.I_noise_reduce = values["I_noise_reduce"]
510
- self.config.O_noise_reduce = values["O_noise_reduce"]
511
- self.config.index_rate = values["index_rate"]
512
- return True
513
-
514
- def start_vc(self):
515
- torch.cuda.empty_cache()
516
- self.flag_vc = True
517
- self.block_frame = int(self.config.block_time * self.config.samplerate)
518
- self.crossfade_frame = int(self.config.crossfade_time * self.config.samplerate)
519
- self.sola_search_frame = int(0.012 * self.config.samplerate)
520
- self.delay_frame = int(0.01 * self.config.samplerate) # 往前预留0.02s
521
- self.extra_frame = int(self.config.extra_time * self.config.samplerate)
522
- self.rvc = None
523
- self.rvc = RVC(
524
- self.config.pitch,
525
- self.config.hubert_path,
526
- self.config.pth_path,
527
- self.config.index_path,
528
- self.config.npy_path,
529
- self.config.index_rate,
530
- )
531
- self.input_wav: np.ndarray = np.zeros(
532
- self.extra_frame
533
- + self.crossfade_frame
534
- + self.sola_search_frame
535
- + self.block_frame,
536
- dtype="float32",
537
- )
538
- self.output_wav: torch.Tensor = torch.zeros(
539
- self.block_frame, device=device, dtype=torch.float32
540
- )
541
- self.sola_buffer: torch.Tensor = torch.zeros(
542
- self.crossfade_frame, device=device, dtype=torch.float32
543
- )
544
- self.fade_in_window: torch.Tensor = torch.linspace(
545
- 0.0, 1.0, steps=self.crossfade_frame, device=device, dtype=torch.float32
546
- )
547
- self.fade_out_window: torch.Tensor = 1 - self.fade_in_window
548
- self.resampler1 = tat.Resample(
549
- orig_freq=self.config.samplerate, new_freq=16000, dtype=torch.float32
550
- )
551
- self.resampler2 = tat.Resample(
552
- orig_freq=self.rvc.tgt_sr,
553
- new_freq=self.config.samplerate,
554
- dtype=torch.float32,
555
- )
556
- thread_vc = threading.Thread(target=self.soundinput)
557
- thread_vc.start()
558
-
559
- def soundinput(self):
560
- """
561
- 接受音频输入
562
- """
563
- with sd.Stream(
564
- channels=2,
565
- callback=self.audio_callback,
566
- blocksize=self.block_frame,
567
- samplerate=self.config.samplerate,
568
- dtype="float32",
569
- ):
570
- while self.flag_vc:
571
- time.sleep(self.config.block_time)
572
- print("Audio block passed.")
573
- print("ENDing VC")
574
-
575
- def audio_callback(
576
- self, indata: np.ndarray, outdata: np.ndarray, frames, times, status
577
- ):
578
- """
579
- 音频处理
580
- """
581
- start_time = time.perf_counter()
582
- indata = librosa.to_mono(indata.T)
583
- if self.config.I_noise_reduce:
584
- indata[:] = nr.reduce_noise(y=indata, sr=self.config.samplerate)
585
-
586
- """noise gate"""
587
- frame_length = 2048
588
- hop_length = 1024
589
- rms = librosa.feature.rms(
590
- y=indata, frame_length=frame_length, hop_length=hop_length
591
- )
592
- db_threhold = librosa.amplitude_to_db(rms, ref=1.0)[0] < self.config.threhold
593
- # print(rms.shape,db.shape,db)
594
- for i in range(db_threhold.shape[0]):
595
- if db_threhold[i]:
596
- indata[i * hop_length : (i + 1) * hop_length] = 0
597
- self.input_wav[:] = np.append(self.input_wav[self.block_frame :], indata)
598
-
599
- # infer
600
- print("input_wav:" + str(self.input_wav.shape))
601
- # print('infered_wav:'+str(infer_wav.shape))
602
- infer_wav: torch.Tensor = self.resampler2(
603
- self.rvc.infer(self.resampler1(torch.from_numpy(self.input_wav)))
604
- )[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].to(
605
- device
606
- )
607
- print("infer_wav:" + str(infer_wav.shape))
608
-
609
- # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
610
- cor_nom = F.conv1d(
611
- infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame],
612
- self.sola_buffer[None, None, :],
613
- )
614
- cor_den = torch.sqrt(
615
- F.conv1d(
616
- infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
617
- ** 2,
618
- torch.ones(1, 1, self.crossfade_frame, device=device),
619
- )
620
- + 1e-8
621
- )
622
- sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
623
- print("sola offset: " + str(int(sola_offset)))
624
-
625
- # crossfade
626
- self.output_wav[:] = infer_wav[sola_offset : sola_offset + self.block_frame]
627
- self.output_wav[: self.crossfade_frame] *= self.fade_in_window
628
- self.output_wav[: self.crossfade_frame] += self.sola_buffer[:]
629
- if sola_offset < self.sola_search_frame:
630
- self.sola_buffer[:] = (
631
- infer_wav[
632
- -self.sola_search_frame
633
- - self.crossfade_frame
634
- + sola_offset : -self.sola_search_frame
635
- + sola_offset
636
- ]
637
- * self.fade_out_window
638
- )
639
- else:
640
- self.sola_buffer[:] = (
641
- infer_wav[-self.crossfade_frame :] * self.fade_out_window
642
- )
643
-
644
- if self.config.O_noise_reduce:
645
- outdata[:] = np.tile(
646
- nr.reduce_noise(
647
- y=self.output_wav[:].cpu().numpy(), sr=self.config.samplerate
648
- ),
649
- (2, 1),
650
- ).T
651
- else:
652
- outdata[:] = self.output_wav[:].repeat(2, 1).t().cpu().numpy()
653
- total_time = time.perf_counter() - start_time
654
- self.window["infer_time"].update(int(total_time * 1000))
655
- print("infer time:" + str(total_time))
656
-
657
- def get_devices(self, update: bool = True):
658
- """获取设备列表"""
659
- if update:
660
- sd._terminate()
661
- sd._initialize()
662
- devices = sd.query_devices()
663
- hostapis = sd.query_hostapis()
664
- for hostapi in hostapis:
665
- for device_idx in hostapi["devices"]:
666
- devices[device_idx]["hostapi_name"] = hostapi["name"]
667
- input_devices = [
668
- f"{d['name']} ({d['hostapi_name']})"
669
- for d in devices
670
- if d["max_input_channels"] > 0
671
- ]
672
- output_devices = [
673
- f"{d['name']} ({d['hostapi_name']})"
674
- for d in devices
675
- if d["max_output_channels"] > 0
676
- ]
677
- input_devices_indices = [
678
- d["index"] if "index" in d else d["name"]
679
- for d in devices
680
- if d["max_input_channels"] > 0
681
- ]
682
- output_devices_indices = [
683
- d["index"] if "index" in d else d["name"]
684
- for d in devices
685
- if d["max_output_channels"] > 0
686
- ]
687
- return (
688
- input_devices,
689
- output_devices,
690
- input_devices_indices,
691
- output_devices_indices,
692
- )
693
-
694
- def set_devices(self, input_device, output_device):
695
- """设置输出设备"""
696
- (
697
- input_devices,
698
- output_devices,
699
- input_device_indices,
700
- output_device_indices,
701
- ) = self.get_devices()
702
- sd.default.device[0] = input_device_indices[input_devices.index(input_device)]
703
- sd.default.device[1] = output_device_indices[
704
- output_devices.index(output_device)
705
- ]
706
- print("input device:" + str(sd.default.device[0]) + ":" + str(input_device))
707
- print("output device:" + str(sd.default.device[1]) + ":" + str(output_device))
708
-
709
-
710
- gui = GUI()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Botn Fiebre Descargar Pc.md DELETED
@@ -1,58 +0,0 @@
1
-
2
- <h1>Cómo descargar y jugar Button Fever en PC</h1>
3
- <p>¿Te encantan los juegos de puzzle que ponen a prueba tus habilidades multitarea y creatividad? Si es así, es posible que desee probar Button Fever, un juego divertido y adictivo que le permite colocar y combinar botones en un tablero. En este artículo, te mostraremos cómo descargar y jugar Button Fever en tu PC, así como algunos consejos y trucos para ayudarte a sacarle el máximo partido. </p>
4
- <h2>botón fiebre descargar pc</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://bltlly.com/2v6Jpl">https://bltlly.com/2v6Jpl</a></b></p><br /><br />
5
- <h2>¿Qué es la fiebre de los botones? </h2>
6
- <h3>Un divertido y adictivo juego de puzzle</h3>
7
- <p>Button Fever es un juego desarrollado por Rollic Games, una empresa especializada en juegos casuales e hiper-casuales para dispositivos móviles. Button Fever es uno de sus títulos más populares, con más de 10 millones de descargas en Google Play Store. El juego es adecuado para todas las edades y se puede jugar fuera de línea o en línea. </p>
8
- <h3>Características y jugabilidad</h3>
9
- <p>El juego es simple pero desafiante. Tienes un tablero con ranuras vacías y una cola de botones en la parte inferior. Su objetivo es colocar los botones en el tablero y limpiar las líneas haciendo coincidir los colores o formas de los botones. Cuantas más líneas borres, más puntos ganarás. También puedes ganar monedas completando niveles, que puedes usar para desbloquear nuevos botones y temas. </p>
10
- <p>El juego tiene diferentes niveles de dificultad, que van de fácil a difícil. Cada nivel tiene un tamaño de tablero diferente, número de botones y límite de tiempo. También puedes elegir entre diferentes modos, como clásico, árcade o zen. El juego también tiene desafíos diarios, tablas de clasificación y logros para mantenerte involucrado. </p>
11
- <h2>Cómo descargar Button Fever en PC? </h2>
12
- <h3>Opción 1: Descargar desde el sitio web oficial</h3>
13
- <p>Si desea descargar Button Fever directamente desde el sitio web del desarrollador, puede seguir estos pasos:</p>
14
- <h4>Paso 1: Visita <a href="( 1 )">el sitio web</a> y haz clic en el botón de descarga. </h4>
15
- <p>Esto te llevará a una página donde puedes elegir tu sistema operativo (Windows o Mac) y descargar el archivo de instalación. </p>
16
- <p></p>
17
-
18
- <p>Una vez que haya descargado el archivo, haga doble clic en él para iniciar el proceso de instalación. Es posible que tenga que conceder permiso para que el programa realice cambios en su dispositivo. Siga las instrucciones en la pantalla para completar la instalación. </p>
19
- <h4>Paso 3: Lanza el juego y disfruta. </h4>
20
- <p>Después de la instalación, puede encontrar un icono de acceso directo para Button Fever en su escritorio o menú de inicio. Haga clic en él para iniciar el juego y empezar a jugar. </p>
21
- <h3>Opción 2: Descarga desde una plataforma de terceros</h3>
22
- <p>Si prefiere descargar Button Fever desde una plataforma de terceros que ofrece una variedad de juegos, puede utilizar una de estas opciones:</p>
23
- <h4>Paso 1: Instalar un lanzador de juegos como Epic Games o Steam.</h4>
24
- <p>Un lanzador de juegos es un programa que te permite acceder, descargar, instalar, actualizar y jugar juegos de diferentes desarrolladores y editores. Algunos de los lanzadores de juegos más populares son Epic Games y Steam, que puedes descargar gratis desde sus respectivos sitios web. </p>
25
- <h4>Paso 2: Crea una cuenta e inicia sesión. </h4>
26
- <p>Después de haber instalado el lanzador de juegos, tendrá que crear una cuenta e iniciar sesión con su correo electrónico y contraseña. También es posible que necesite verificar su cuenta y aceptar los términos y condiciones de la plataforma. </p>
27
- <h4>Paso 3: Busca Button Fever y cómpralo o consíguelo gratis. </h4>
28
- <p>Una vez que haya iniciado sesión, puede navegar por la biblioteca de juegos y buscar Button Fever. Dependiendo de la plataforma, es posible que tenga que comprar el juego o conseguirlo de forma gratuita. Por ejemplo, en Epic Games, Button Fever está disponible de forma gratuita, mientras que en Steam, cuesta $4.99. También puedes consultar las reseñas, valoraciones, capturas de pantalla y vídeos del juego antes de decidirte a conseguirlo. </p>
29
- <h4>Paso 4: Instalar el juego y jugar desde el lanzador. </h4>
30
-
31
- <h2>Consejos y trucos para jugar Button Fever en PC</h2>
32
- <p>Jugando Button Fever en PC puede ser más agradable y conveniente que reproducirlo en un dispositivo móvil. Aquí hay algunos consejos y trucos para ayudarte a jugar mejor y divertirte más:</p>
33
- <h3>Usa el ratón o el teclado para interactuar con los botones. </h3>
34
- <p>Una de las ventajas de jugar Button Fever en PC es que puedes usar el ratón o el teclado para interactuar con los botones. Puede arrastrar y soltar los botones con el ratón, o utilizar las teclas de flecha para moverlos. También puede utilizar la barra espaciadora para girarlos o presionar la tecla Intro para colocarlos en el tablero. Esto puede hacer que su juego sea más rápido y suave. </p>
35
- <h3>Borrar las líneas haciendo coincidir los colores o formas de los botones. </h3>
36
- <p>El objetivo principal de Button Fever es limpiar las líneas haciendo coincidir los colores o formas de los botones. Puede combinar tres o más botones del mismo color o forma horizontal, vertical o diagonalmente. Cuando borres una línea, ganarás puntos y monedas, y harás espacio para más botones. También puedes crear combos borrando varias líneas a la vez, lo que te dará puntos de bonificación y monedas. </p>
37
- <h3>Gana monedas y desbloquea nuevos botones y temas. </h3>
38
- <p>Al jugar Button Fever, ganarás monedas que puedes usar para desbloquear nuevos botones y temas. Cada botón tiene un color diferente, forma y diseño, tales como estrellas, corazones, flores, animales, frutas, etc. Cada tema tiene un fondo diferente, música y efectos de sonido, tales como bosque, playa, espacio, etc. Puede personalizar su juego eligiendo sus botones y temas favoritos de la tienda. </p>
39
- <h3>Ponte a prueba con diferentes niveles y modos. </h3>
40
-
41
- <h2>Conclusión</h2>
42
- <p>Button Fever es un divertido y adictivo juego de puzzle que te permite colocar y combinar botones en un tablero. Puede descargar y reproducir Button Fever en su PC siguiendo una de las opciones que hemos mostrado anteriormente. También puede utilizar algunos de nuestros consejos y trucos para mejorar su juego y divertirse más. Si te gustan los juegos de puzzle que ponen a prueba tus habilidades multitarea y creatividad, definitivamente deberías probar Button Fever. </p>
43
- <h2>Preguntas frecuentes</h2>
44
- <p>Aquí hay algunas preguntas frecuentes sobre Button Fever:</p>
45
- <ul>
46
- <li><b>¿Está libre la fiebre de los botones? </b></li>
47
- <p>Button Fever es gratis para descargar y jugar en dispositivos móviles desde Google Play Store o App Store. Sin embargo, puede contener anuncios o compras dentro de la aplicación que requieren dinero real. En plataformas de PC como Epic Games o Steam, Button Fever puede ser gratis o de pago dependiendo de la disponibilidad y oferta de la plataforma. </p>
48
- <li><b>¿Es segura la fiebre de los botones? </b></li>
49
- <p>Button Fever es seguro para descargar y jugar siempre y cuando lo obtengas de una fuente confiable como el sitio web oficial o una plataforma de buena reputación como Epic Games o Steam. Debes evitar descargar Button Fever de fuentes desconocidas o sospechosas que puedan contener virus o malware que puedan dañar tu dispositivo o robar tu información personal. </p>
50
- <li><b>¿Cómo puedo contactar al desarrollador de Button Fever? </b></li>
51
- <p>Si tiene alguna pregunta, comentario o problema con respecto a Button Fever, puede ponerse en contacto con el desarrollador enviando un correo electrónico a <a href="mailto:[email protected]">[email protected]</a>. También puede visitar su sitio web <a href=">"</a> o seguirlos en <a href="">Facebook</a>, <a href="">Twitter</a>, o <a href=">Instagram</a> para obtener más información y actualizaciones. </p>
52
- <li><b>¿Cómo puedo jugar Button Fever con mis amigos? </b></li>
53
-
54
- <li><b>¿Cómo puedo obtener más monedas en Button Fever? </b></li>
55
- <p>Las monedas son la moneda de Button Fever que puedes usar para desbloquear nuevos botones y temas. Puedes ganar monedas limpiando niveles, completando desafíos diarios, viendo anuncios o haciendo compras en la aplicación. También puedes conseguir monedas gratis girando la rueda o abriendo el cofre todos los días. </p>
56
- </ul></p> 64aa2da5cf<br />
57
- <br />
58
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cazador Asesino 2 Apk Descargar.md DELETED
@@ -1,67 +0,0 @@
1
- <br />
2
- <h1>Hunter Assassin 2 APK Descargar: Una guía para los usuarios de Android</h1>
3
- <p>Si usted está buscando un juego lleno de acción que pondrá a prueba su sigilo y habilidades de disparo, es posible que desee probar Hunter Assassin 2. Esta es la secuela del popular juego Hunter Assassin, que fue uno de los juegos más descargados de 2020. En este juego, juegas como un asesino que tiene que eliminar objetivos usando sombras y tu entorno. También tienes que recoger objetos valiosos, desbloquear nuevas armas y héroes, y enfrentarse a jefes desafiantes. En este artículo, te contaremos todo lo que necesitas saber sobre Hunter Assassin 2, incluyendo sus características, cómo descargarlo en tu dispositivo Android, algunos consejos y trucos para jugarlo, y algunas alternativas a él. </p>
4
- <h2>¿Qué es Hunter Assassin 2?</h2>
5
- <p>Hunter Assassin 2 es un juego de acción táctica en 2D desarrollado por Ruby Game Studio. Es la muy esperada secuela de Hunter Assassin, que fue descargada por más de 300 millones de jugadores en todo el mundo. El juego tiene más de 10 millones de descargas en Google Play Store y tiene una calificación de 4.0 de 5 estrellas. El juego está clasificado para edades de 7 en adelante y contiene anuncios y compras en la aplicación. </p>
6
- <h2>cazador asesino 2 apk descargar</h2><br /><p><b><b>DOWNLOAD</b> &#10084; <a href="https://bltlly.com/2v6KMW">https://bltlly.com/2v6KMW</a></b></p><br /><br />
7
- <h3>Características de Hunter Assassin 2</h3>
8
- <p>Hunter Assassin 2 tiene muchas características que lo convierten en un juego emocionante y adictivo. Aquí están algunas de ellas:</p>
9
- <h4>Juego de historia pegajosa</h4>
10
- <p>El juego tiene una historia pegajosa que te mantendrá enganchado al juego. Tienes que completar misiones y objetivos en cada nivel, como eliminar un cierto número de objetivos, recoger una cierta cantidad de oro, o sobrevivir durante una cierta cantidad de tiempo. El juego también tiene diferentes escenarios y entornos, como almacenes, fábricas, selvas, desiertos y más. </p>
11
- <h4>Juego multinivel</h4>
12
-
13
- <h4>Habilidades y caracteres únicos</h4>
14
- <p>El juego tiene una variedad de habilidades y personajes que puedes desbloquear y actualizar usando los objetos que recojas de los objetivos eliminados. Cada habilidad y personaje tiene diferentes habilidades y ventajas que pueden ayudarte en diferentes situaciones. Por ejemplo, algunas habilidades pueden aumentar tu velocidad, salud o daño, mientras que algunos personajes pueden disparar más rápido, recargar más rápido o tener más munición. </p>
15
- <h4>Armas y herramientas mejoradas</h4>
16
- <p>El juego también tiene una gama de armas y herramientas que puedes usar para destruir las bases de los enemigos y derrotar a los jefes. Puedes actualizar tus armas y herramientas usando el oro que ganas al completar los niveles. Algunas de las armas y herramientas incluyen pistolas, rifles, escopetas, granadas, minas, drones, cohetes, láseres y más. </p>
17
- <h4>Artículos especiales</h4>
18
- <p>El juego también tiene artículos especiales que pueden darte beneficios o bonificaciones adicionales en el juego. Algunos de los artículos especiales incluyen cofres, llaves, diamantes, estrellas y más. Puedes usar estos elementos para desbloquear nuevas habilidades y personajes, obtener más oro o acceder a niveles secretos. </p>
19
- <p></p>
20
- <h3> Cómo descargar Hunter Assassin 2 APK en dispositivos Android</h3>
21
- <p>Si quieres jugar Hunter Assassin 2 en tu dispositivo Android, puedes descargarlo desde Google Play Store o desde otras fuentes. Sin embargo, si desea obtener la última versión del juego o acceder a algunas características que no están disponibles en la aplicación oficial, puede descargar el archivo APK Hunter Assassin 2 de un sitio web de confianza. Estos son los pasos para hacerlo:</p>
22
- <h4>Paso 1: Habilitar fuentes desconocidas</h4>
23
- <p>Antes de poder instalar el archivo APK, debe habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y luego active la opción para fuentes desconocidas. Puedes ver un mensaje de advertencia, pero puedes ignorarlo si confías en la fuente del archivo APK. </p>
24
- <h4>Paso 2: Descargar el archivo APK</h4>
25
-
26
- <h4>Paso 3: Instalar el archivo APK</h4>
27
- <p>Una vez que haya descargado el archivo APK, puede instalarlo en su dispositivo. Para ello, localice el archivo en su carpeta de descargas o donde quiera que lo haya guardado. Luego, toque en él y siga las instrucciones en la pantalla. Es posible que tenga que conceder algunos permisos o aceptar algunos términos y condiciones antes de instalarlo. </p>
28
- <h4>Paso 4: Iniciar el juego y disfrutar de</h4>
29
- <p>Después de instalar el archivo APK, puede iniciar el juego y comenzar a jugarlo. Verás un icono de Hunter Assassin 2 en la pantalla de inicio o en el cajón de la aplicación. Toca en él y disfruta del juego. También puedes actualizar el juego regularmente descargando nuevos archivos APK de la misma fuente o buscando actualizaciones en el juego mismo. </p>
30
- <h3>Consejos y trucos para jugar Hunter Assassin 2</h3>
31
- <p>Hunter Assassin 2 es un juego divertido y desafiante que requiere habilidad y estrategia. Aquí hay algunos consejos y trucos que pueden ayudarte a jugar mejor y disfrutar más:</p>
32
- <h4>Usa sombras y alrededores para tu ventaja</h4>
33
- <p>La clave para ser un asesino exitoso es ser sigiloso y evitar ser detectado por los enemigos. Para hacer esto, tienes que usar las sombras y el entorno a tu favor. Puedes esconderte detrás de paredes, cajas, barriles u otros objetos que puedan bloquear la visión de los enemigos. También puedes usar sombras para mezclarte con las zonas oscuras del mapa. De esta manera, puedes sorprender a los enemigos y eliminarlos sin alertar a los demás. </p>
34
- <h4>Evita ser acorralado por los enemigos</h4>
35
- <p>Otra cosa a evitar es ser acorralado por los enemigos. Si estás rodeado de enemigos o atrapado en un callejón sin salida, no tendrás ninguna ruta de escape y ninguna posibilidad de supervivencia. Para evitar esto, tienes que planificar tus movimientos cuidadosamente y siempre tener una estrategia de salida. También puedes usar tus armas y herramientas para crear distracciones o distracciones que puedan alejar a los enemigos de ti. </p>
36
- <h4>Recoge objetos valiosos de objetivos eliminados</h4>
37
-
38
- <h4>Mejora tus habilidades y armas regularmente</h4>
39
- <p>Para mejorar tu rendimiento y eficiencia en el juego, debes mejorar tus habilidades y armas regularmente. Puedes hacer esto usando el oro que ganas al completar niveles o recolectar artículos. Puedes mejorar tus habilidades como velocidad, salud, daños, tiempo de recarga, capacidad de munición y más. También puede actualizar sus armas como pistolas, rifles, escopetas, granadas, minas , drones, cohetes, láseres y más. Mejorar tus habilidades y armas te ayudará a eliminar objetivos más rápido, fácil y eficientemente. </p>
40
- <h4>Ponte a prueba con batallas de jefes</h4>
41
- <p>Si quieres poner a prueba tus habilidades y estrategia al límite, puedes desafiarte a ti mismo con batallas de jefes. Las batallas contra jefes son niveles especiales donde tienes que enfrentarte a un enemigo poderoso que tiene más salud, daño y habilidades que los enemigos normales. Tienes que usar tus mejores armas y herramientas, así como tus habilidades de sigilo y tiro, para derrotar al jefe. Las batallas contra jefes son más difíciles que los niveles normales, pero también ofrecen más recompensas y satisfacción. </p>
42
- <h3>Alternativas a Hunter Assassin 2</h3>
43
- <p>Si te gusta Hunter Assassin 2, es posible que también te gusten algunas de estas alternativas que tienen un modo de juego y características similares:</p>
44
- <h4>Cazador Asesino</h4>
45
- <p>Este es el juego original que comenzó la serie Hunter Assassin. Tiene el mismo concepto y mecánica que Hunter Assassin 2, pero con gráficos más simples y menos niveles. Todavía puedes disfrutar de la emoción de ser un asesino y eliminar objetivos usando sombras y alrededores. También puedes desbloquear nuevos personajes y armas a medida que avanzas en el juego. </p>
46
- <h4>Tiroteo 3D</h4>
47
-
48
- <h4>Guerra de Stickman</h4>
49
- <p>Este es un juego de OneSoft Global PTE. LTD. que cuenta con personajes stickman y acción. En este juego, tienes que controlar a un guerrero stickman que tiene que luchar contra otros stickmen usando varias armas y habilidades. Puede personalizar su stickman con diferentes trajes, sombreros, máscaras y accesorios. También puedes jugar con amigos en modo multijugador o competir con otros jugadores en rankings online. </p>
50
- <h3>Conclusión</h3>
51
- <p>Hunter Assassin 2 es un juego que te mantendrá entretenido y desafiado durante horas. Tiene muchas características que lo convierten en un juego emocionante y adictivo, como juego de historia pegajosa, juego de varios niveles, habilidades y personajes únicos, armas y herramientas mejoradas, objetos especiales y batallas contra jefes. Puede descargar Hunter Assassin 2 APK en su dispositivo Android siguiendo los pasos que hemos proporcionado en este artículo. También puedes utilizar algunos consejos y trucos que hemos compartido para jugar mejor y disfrutar más. Si estás buscando alternativas a Hunter Assassin 2, puedes probar algunos de los juegos que hemos sugerido que tienen un modo de juego y características similares. </p>
52
- <h3>Preguntas frecuentes</h3>
53
- <p>Aquí hay algunas preguntas frecuentes sobre Hunter Assassin 2:</p>
54
- <ol>
55
- <li><b>¿Hunter Assassin 2 es libre de jugar? </b></li>
56
- <p>Sí, Hunter Assassin 2 es gratis para jugar en dispositivos Android. Sin embargo, contiene anuncios y compras en la aplicación que pueden mejorar su experiencia de juego o eliminar algunas limitaciones. </p>
57
- <li><b>¿Es seguro descargar Hunter Assassin 2? </b></li>
58
- <p>Sí, Hunter Assassin 2 es seguro para descargar desde la Google Play Store o desde otras fuentes de confianza. Sin embargo, siempre debes revisar el archivo en busca de virus o malware antes de instalarlo en tu dispositivo. </p>
59
- <li><b>¿Hunter Assassin 2 está fuera de línea o en línea? </b></li>
60
- <p>Hunter Assassin 2 es principalmente un juego fuera de línea que no requiere una conexión a Internet para jugar. Sin embargo, algunas características pueden requerir una conexión a Internet, como actualizar el juego, acceder a algunos elementos o niveles, o ver anuncios. </p>
61
-
62
- <p>Puede ponerse en contacto con el desarrollador de Hunter Assassin 2 enviando un correo electrónico a [email protected] o visitando su sitio web en https://www.rubygamestudio.com/.</p>
63
- <li><b>¿Cómo puedo calificar o revisar Hunter Assassin 2?</b></li>
64
- <p>Puede calificar o revisar Hunter Assassin 2 yendo a la página de Google Play Store del juego y tocando las estrellas o el botón de escribir una reseña. También puede compartir sus comentarios o sugerencias con el desarrollador u otros jugadores dejando un comentario en sus páginas de redes sociales o foros. </p>
65
- </ol></p> 64aa2da5cf<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BhagatSurya/convet_pdf_to_txt/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Convet Pdf To Txt
3
- emoji: 🐠
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/legacy/resolver.py DELETED
@@ -1,600 +0,0 @@
1
- """Dependency Resolution
2
-
3
- The dependency resolution in pip is performed as follows:
4
-
5
- for top-level requirements:
6
- a. only one spec allowed per project, regardless of conflicts or not.
7
- otherwise a "double requirement" exception is raised
8
- b. they override sub-dependency requirements.
9
- for sub-dependencies
10
- a. "first found, wins" (where the order is breadth first)
11
- """
12
-
13
- # The following comment should be removed at some point in the future.
14
- # mypy: strict-optional=False
15
-
16
- import logging
17
- import sys
18
- from collections import defaultdict
19
- from itertools import chain
20
- from typing import DefaultDict, Iterable, List, Optional, Set, Tuple
21
-
22
- from pip._vendor.packaging import specifiers
23
- from pip._vendor.packaging.requirements import Requirement
24
-
25
- from pip._internal.cache import WheelCache
26
- from pip._internal.exceptions import (
27
- BestVersionAlreadyInstalled,
28
- DistributionNotFound,
29
- HashError,
30
- HashErrors,
31
- InstallationError,
32
- NoneMetadataError,
33
- UnsupportedPythonVersion,
34
- )
35
- from pip._internal.index.package_finder import PackageFinder
36
- from pip._internal.metadata import BaseDistribution
37
- from pip._internal.models.link import Link
38
- from pip._internal.models.wheel import Wheel
39
- from pip._internal.operations.prepare import RequirementPreparer
40
- from pip._internal.req.req_install import (
41
- InstallRequirement,
42
- check_invalid_constraint_type,
43
- )
44
- from pip._internal.req.req_set import RequirementSet
45
- from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
46
- from pip._internal.utils import compatibility_tags
47
- from pip._internal.utils.compatibility_tags import get_supported
48
- from pip._internal.utils.direct_url_helpers import direct_url_from_link
49
- from pip._internal.utils.logging import indent_log
50
- from pip._internal.utils.misc import normalize_version_info
51
- from pip._internal.utils.packaging import check_requires_python
52
-
53
- logger = logging.getLogger(__name__)
54
-
55
- DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]]
56
-
57
-
58
- def _check_dist_requires_python(
59
- dist: BaseDistribution,
60
- version_info: Tuple[int, int, int],
61
- ignore_requires_python: bool = False,
62
- ) -> None:
63
- """
64
- Check whether the given Python version is compatible with a distribution's
65
- "Requires-Python" value.
66
-
67
- :param version_info: A 3-tuple of ints representing the Python
68
- major-minor-micro version to check.
69
- :param ignore_requires_python: Whether to ignore the "Requires-Python"
70
- value if the given Python version isn't compatible.
71
-
72
- :raises UnsupportedPythonVersion: When the given Python version isn't
73
- compatible.
74
- """
75
- # This idiosyncratically converts the SpecifierSet to str and let
76
- # check_requires_python then parse it again into SpecifierSet. But this
77
- # is the legacy resolver so I'm just not going to bother refactoring.
78
- try:
79
- requires_python = str(dist.requires_python)
80
- except FileNotFoundError as e:
81
- raise NoneMetadataError(dist, str(e))
82
- try:
83
- is_compatible = check_requires_python(
84
- requires_python,
85
- version_info=version_info,
86
- )
87
- except specifiers.InvalidSpecifier as exc:
88
- logger.warning(
89
- "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc
90
- )
91
- return
92
-
93
- if is_compatible:
94
- return
95
-
96
- version = ".".join(map(str, version_info))
97
- if ignore_requires_python:
98
- logger.debug(
99
- "Ignoring failed Requires-Python check for package %r: %s not in %r",
100
- dist.raw_name,
101
- version,
102
- requires_python,
103
- )
104
- return
105
-
106
- raise UnsupportedPythonVersion(
107
- "Package {!r} requires a different Python: {} not in {!r}".format(
108
- dist.raw_name, version, requires_python
109
- )
110
- )
111
-
112
-
113
- class Resolver(BaseResolver):
114
- """Resolves which packages need to be installed/uninstalled to perform \
115
- the requested operation without breaking the requirements of any package.
116
- """
117
-
118
- _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
119
-
120
- def __init__(
121
- self,
122
- preparer: RequirementPreparer,
123
- finder: PackageFinder,
124
- wheel_cache: Optional[WheelCache],
125
- make_install_req: InstallRequirementProvider,
126
- use_user_site: bool,
127
- ignore_dependencies: bool,
128
- ignore_installed: bool,
129
- ignore_requires_python: bool,
130
- force_reinstall: bool,
131
- upgrade_strategy: str,
132
- py_version_info: Optional[Tuple[int, ...]] = None,
133
- ) -> None:
134
- super().__init__()
135
- assert upgrade_strategy in self._allowed_strategies
136
-
137
- if py_version_info is None:
138
- py_version_info = sys.version_info[:3]
139
- else:
140
- py_version_info = normalize_version_info(py_version_info)
141
-
142
- self._py_version_info = py_version_info
143
-
144
- self.preparer = preparer
145
- self.finder = finder
146
- self.wheel_cache = wheel_cache
147
-
148
- self.upgrade_strategy = upgrade_strategy
149
- self.force_reinstall = force_reinstall
150
- self.ignore_dependencies = ignore_dependencies
151
- self.ignore_installed = ignore_installed
152
- self.ignore_requires_python = ignore_requires_python
153
- self.use_user_site = use_user_site
154
- self._make_install_req = make_install_req
155
-
156
- self._discovered_dependencies: DiscoveredDependencies = defaultdict(list)
157
-
158
- def resolve(
159
- self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
160
- ) -> RequirementSet:
161
- """Resolve what operations need to be done
162
-
163
- As a side-effect of this method, the packages (and their dependencies)
164
- are downloaded, unpacked and prepared for installation. This
165
- preparation is done by ``pip.operations.prepare``.
166
-
167
- Once PyPI has static dependency metadata available, it would be
168
- possible to move the preparation to become a step separated from
169
- dependency resolution.
170
- """
171
- requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels)
172
- for req in root_reqs:
173
- if req.constraint:
174
- check_invalid_constraint_type(req)
175
- self._add_requirement_to_set(requirement_set, req)
176
-
177
- # Actually prepare the files, and collect any exceptions. Most hash
178
- # exceptions cannot be checked ahead of time, because
179
- # _populate_link() needs to be called before we can make decisions
180
- # based on link type.
181
- discovered_reqs: List[InstallRequirement] = []
182
- hash_errors = HashErrors()
183
- for req in chain(requirement_set.all_requirements, discovered_reqs):
184
- try:
185
- discovered_reqs.extend(self._resolve_one(requirement_set, req))
186
- except HashError as exc:
187
- exc.req = req
188
- hash_errors.append(exc)
189
-
190
- if hash_errors:
191
- raise hash_errors
192
-
193
- return requirement_set
194
-
195
- def _add_requirement_to_set(
196
- self,
197
- requirement_set: RequirementSet,
198
- install_req: InstallRequirement,
199
- parent_req_name: Optional[str] = None,
200
- extras_requested: Optional[Iterable[str]] = None,
201
- ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]:
202
- """Add install_req as a requirement to install.
203
-
204
- :param parent_req_name: The name of the requirement that needed this
205
- added. The name is used because when multiple unnamed requirements
206
- resolve to the same name, we could otherwise end up with dependency
207
- links that point outside the Requirements set. parent_req must
208
- already be added. Note that None implies that this is a user
209
- supplied requirement, vs an inferred one.
210
- :param extras_requested: an iterable of extras used to evaluate the
211
- environment markers.
212
- :return: Additional requirements to scan. That is either [] if
213
- the requirement is not applicable, or [install_req] if the
214
- requirement is applicable and has just been added.
215
- """
216
- # If the markers do not match, ignore this requirement.
217
- if not install_req.match_markers(extras_requested):
218
- logger.info(
219
- "Ignoring %s: markers '%s' don't match your environment",
220
- install_req.name,
221
- install_req.markers,
222
- )
223
- return [], None
224
-
225
- # If the wheel is not supported, raise an error.
226
- # Should check this after filtering out based on environment markers to
227
- # allow specifying different wheels based on the environment/OS, in a
228
- # single requirements file.
229
- if install_req.link and install_req.link.is_wheel:
230
- wheel = Wheel(install_req.link.filename)
231
- tags = compatibility_tags.get_supported()
232
- if requirement_set.check_supported_wheels and not wheel.supported(tags):
233
- raise InstallationError(
234
- "{} is not a supported wheel on this platform.".format(
235
- wheel.filename
236
- )
237
- )
238
-
239
- # This next bit is really a sanity check.
240
- assert (
241
- not install_req.user_supplied or parent_req_name is None
242
- ), "a user supplied req shouldn't have a parent"
243
-
244
- # Unnamed requirements are scanned again and the requirement won't be
245
- # added as a dependency until after scanning.
246
- if not install_req.name:
247
- requirement_set.add_unnamed_requirement(install_req)
248
- return [install_req], None
249
-
250
- try:
251
- existing_req: Optional[
252
- InstallRequirement
253
- ] = requirement_set.get_requirement(install_req.name)
254
- except KeyError:
255
- existing_req = None
256
-
257
- has_conflicting_requirement = (
258
- parent_req_name is None
259
- and existing_req
260
- and not existing_req.constraint
261
- and existing_req.extras == install_req.extras
262
- and existing_req.req
263
- and install_req.req
264
- and existing_req.req.specifier != install_req.req.specifier
265
- )
266
- if has_conflicting_requirement:
267
- raise InstallationError(
268
- "Double requirement given: {} (already in {}, name={!r})".format(
269
- install_req, existing_req, install_req.name
270
- )
271
- )
272
-
273
- # When no existing requirement exists, add the requirement as a
274
- # dependency and it will be scanned again after.
275
- if not existing_req:
276
- requirement_set.add_named_requirement(install_req)
277
- # We'd want to rescan this requirement later
278
- return [install_req], install_req
279
-
280
- # Assume there's no need to scan, and that we've already
281
- # encountered this for scanning.
282
- if install_req.constraint or not existing_req.constraint:
283
- return [], existing_req
284
-
285
- does_not_satisfy_constraint = install_req.link and not (
286
- existing_req.link and install_req.link.path == existing_req.link.path
287
- )
288
- if does_not_satisfy_constraint:
289
- raise InstallationError(
290
- "Could not satisfy constraints for '{}': "
291
- "installation from path or url cannot be "
292
- "constrained to a version".format(install_req.name)
293
- )
294
- # If we're now installing a constraint, mark the existing
295
- # object for real installation.
296
- existing_req.constraint = False
297
- # If we're now installing a user supplied requirement,
298
- # mark the existing object as such.
299
- if install_req.user_supplied:
300
- existing_req.user_supplied = True
301
- existing_req.extras = tuple(
302
- sorted(set(existing_req.extras) | set(install_req.extras))
303
- )
304
- logger.debug(
305
- "Setting %s extras to: %s",
306
- existing_req,
307
- existing_req.extras,
308
- )
309
- # Return the existing requirement for addition to the parent and
310
- # scanning again.
311
- return [existing_req], existing_req
312
-
313
- def _is_upgrade_allowed(self, req: InstallRequirement) -> bool:
314
- if self.upgrade_strategy == "to-satisfy-only":
315
- return False
316
- elif self.upgrade_strategy == "eager":
317
- return True
318
- else:
319
- assert self.upgrade_strategy == "only-if-needed"
320
- return req.user_supplied or req.constraint
321
-
322
- def _set_req_to_reinstall(self, req: InstallRequirement) -> None:
323
- """
324
- Set a requirement to be installed.
325
- """
326
- # Don't uninstall the conflict if doing a user install and the
327
- # conflict is not a user install.
328
- if not self.use_user_site or req.satisfied_by.in_usersite:
329
- req.should_reinstall = True
330
- req.satisfied_by = None
331
-
332
- def _check_skip_installed(
333
- self, req_to_install: InstallRequirement
334
- ) -> Optional[str]:
335
- """Check if req_to_install should be skipped.
336
-
337
- This will check if the req is installed, and whether we should upgrade
338
- or reinstall it, taking into account all the relevant user options.
339
-
340
- After calling this req_to_install will only have satisfied_by set to
341
- None if the req_to_install is to be upgraded/reinstalled etc. Any
342
- other value will be a dist recording the current thing installed that
343
- satisfies the requirement.
344
-
345
- Note that for vcs urls and the like we can't assess skipping in this
346
- routine - we simply identify that we need to pull the thing down,
347
- then later on it is pulled down and introspected to assess upgrade/
348
- reinstalls etc.
349
-
350
- :return: A text reason for why it was skipped, or None.
351
- """
352
- if self.ignore_installed:
353
- return None
354
-
355
- req_to_install.check_if_exists(self.use_user_site)
356
- if not req_to_install.satisfied_by:
357
- return None
358
-
359
- if self.force_reinstall:
360
- self._set_req_to_reinstall(req_to_install)
361
- return None
362
-
363
- if not self._is_upgrade_allowed(req_to_install):
364
- if self.upgrade_strategy == "only-if-needed":
365
- return "already satisfied, skipping upgrade"
366
- return "already satisfied"
367
-
368
- # Check for the possibility of an upgrade. For link-based
369
- # requirements we have to pull the tree down and inspect to assess
370
- # the version #, so it's handled way down.
371
- if not req_to_install.link:
372
- try:
373
- self.finder.find_requirement(req_to_install, upgrade=True)
374
- except BestVersionAlreadyInstalled:
375
- # Then the best version is installed.
376
- return "already up-to-date"
377
- except DistributionNotFound:
378
- # No distribution found, so we squash the error. It will
379
- # be raised later when we re-try later to do the install.
380
- # Why don't we just raise here?
381
- pass
382
-
383
- self._set_req_to_reinstall(req_to_install)
384
- return None
385
-
386
- def _find_requirement_link(self, req: InstallRequirement) -> Optional[Link]:
387
- upgrade = self._is_upgrade_allowed(req)
388
- best_candidate = self.finder.find_requirement(req, upgrade)
389
- if not best_candidate:
390
- return None
391
-
392
- # Log a warning per PEP 592 if necessary before returning.
393
- link = best_candidate.link
394
- if link.is_yanked:
395
- reason = link.yanked_reason or "<none given>"
396
- msg = (
397
- # Mark this as a unicode string to prevent
398
- # "UnicodeEncodeError: 'ascii' codec can't encode character"
399
- # in Python 2 when the reason contains non-ascii characters.
400
- "The candidate selected for download or install is a "
401
- "yanked version: {candidate}\n"
402
- "Reason for being yanked: {reason}"
403
- ).format(candidate=best_candidate, reason=reason)
404
- logger.warning(msg)
405
-
406
- return link
407
-
408
- def _populate_link(self, req: InstallRequirement) -> None:
409
- """Ensure that if a link can be found for this, that it is found.
410
-
411
- Note that req.link may still be None - if the requirement is already
412
- installed and not needed to be upgraded based on the return value of
413
- _is_upgrade_allowed().
414
-
415
- If preparer.require_hashes is True, don't use the wheel cache, because
416
- cached wheels, always built locally, have different hashes than the
417
- files downloaded from the index server and thus throw false hash
418
- mismatches. Furthermore, cached wheels at present have undeterministic
419
- contents due to file modification times.
420
- """
421
- if req.link is None:
422
- req.link = self._find_requirement_link(req)
423
-
424
- if self.wheel_cache is None or self.preparer.require_hashes:
425
- return
426
- cache_entry = self.wheel_cache.get_cache_entry(
427
- link=req.link,
428
- package_name=req.name,
429
- supported_tags=get_supported(),
430
- )
431
- if cache_entry is not None:
432
- logger.debug("Using cached wheel link: %s", cache_entry.link)
433
- if req.link is req.original_link and cache_entry.persistent:
434
- req.cached_wheel_source_link = req.link
435
- if cache_entry.origin is not None:
436
- req.download_info = cache_entry.origin
437
- else:
438
- # Legacy cache entry that does not have origin.json.
439
- # download_info may miss the archive_info.hashes field.
440
- req.download_info = direct_url_from_link(
441
- req.link, link_is_in_wheel_cache=cache_entry.persistent
442
- )
443
- req.link = cache_entry.link
444
-
445
- def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution:
446
- """Takes a InstallRequirement and returns a single AbstractDist \
447
- representing a prepared variant of the same.
448
- """
449
- if req.editable:
450
- return self.preparer.prepare_editable_requirement(req)
451
-
452
- # satisfied_by is only evaluated by calling _check_skip_installed,
453
- # so it must be None here.
454
- assert req.satisfied_by is None
455
- skip_reason = self._check_skip_installed(req)
456
-
457
- if req.satisfied_by:
458
- return self.preparer.prepare_installed_requirement(req, skip_reason)
459
-
460
- # We eagerly populate the link, since that's our "legacy" behavior.
461
- self._populate_link(req)
462
- dist = self.preparer.prepare_linked_requirement(req)
463
-
464
- # NOTE
465
- # The following portion is for determining if a certain package is
466
- # going to be re-installed/upgraded or not and reporting to the user.
467
- # This should probably get cleaned up in a future refactor.
468
-
469
- # req.req is only avail after unpack for URL
470
- # pkgs repeat check_if_exists to uninstall-on-upgrade
471
- # (#14)
472
- if not self.ignore_installed:
473
- req.check_if_exists(self.use_user_site)
474
-
475
- if req.satisfied_by:
476
- should_modify = (
477
- self.upgrade_strategy != "to-satisfy-only"
478
- or self.force_reinstall
479
- or self.ignore_installed
480
- or req.link.scheme == "file"
481
- )
482
- if should_modify:
483
- self._set_req_to_reinstall(req)
484
- else:
485
- logger.info(
486
- "Requirement already satisfied (use --upgrade to upgrade): %s",
487
- req,
488
- )
489
- return dist
490
-
491
- def _resolve_one(
492
- self,
493
- requirement_set: RequirementSet,
494
- req_to_install: InstallRequirement,
495
- ) -> List[InstallRequirement]:
496
- """Prepare a single requirements file.
497
-
498
- :return: A list of additional InstallRequirements to also install.
499
- """
500
- # Tell user what we are doing for this requirement:
501
- # obtain (editable), skipping, processing (local url), collecting
502
- # (remote url or package name)
503
- if req_to_install.constraint or req_to_install.prepared:
504
- return []
505
-
506
- req_to_install.prepared = True
507
-
508
- # Parse and return dependencies
509
- dist = self._get_dist_for(req_to_install)
510
- # This will raise UnsupportedPythonVersion if the given Python
511
- # version isn't compatible with the distribution's Requires-Python.
512
- _check_dist_requires_python(
513
- dist,
514
- version_info=self._py_version_info,
515
- ignore_requires_python=self.ignore_requires_python,
516
- )
517
-
518
- more_reqs: List[InstallRequirement] = []
519
-
520
- def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None:
521
- # This idiosyncratically converts the Requirement to str and let
522
- # make_install_req then parse it again into Requirement. But this is
523
- # the legacy resolver so I'm just not going to bother refactoring.
524
- sub_install_req = self._make_install_req(str(subreq), req_to_install)
525
- parent_req_name = req_to_install.name
526
- to_scan_again, add_to_parent = self._add_requirement_to_set(
527
- requirement_set,
528
- sub_install_req,
529
- parent_req_name=parent_req_name,
530
- extras_requested=extras_requested,
531
- )
532
- if parent_req_name and add_to_parent:
533
- self._discovered_dependencies[parent_req_name].append(add_to_parent)
534
- more_reqs.extend(to_scan_again)
535
-
536
- with indent_log():
537
- # We add req_to_install before its dependencies, so that we
538
- # can refer to it when adding dependencies.
539
- if not requirement_set.has_requirement(req_to_install.name):
540
- # 'unnamed' requirements will get added here
541
- # 'unnamed' requirements can only come from being directly
542
- # provided by the user.
543
- assert req_to_install.user_supplied
544
- self._add_requirement_to_set(
545
- requirement_set, req_to_install, parent_req_name=None
546
- )
547
-
548
- if not self.ignore_dependencies:
549
- if req_to_install.extras:
550
- logger.debug(
551
- "Installing extra requirements: %r",
552
- ",".join(req_to_install.extras),
553
- )
554
- missing_requested = sorted(
555
- set(req_to_install.extras) - set(dist.iter_provided_extras())
556
- )
557
- for missing in missing_requested:
558
- logger.warning(
559
- "%s %s does not provide the extra '%s'",
560
- dist.raw_name,
561
- dist.version,
562
- missing,
563
- )
564
-
565
- available_requested = sorted(
566
- set(dist.iter_provided_extras()) & set(req_to_install.extras)
567
- )
568
- for subreq in dist.iter_dependencies(available_requested):
569
- add_req(subreq, extras_requested=available_requested)
570
-
571
- return more_reqs
572
-
573
- def get_installation_order(
574
- self, req_set: RequirementSet
575
- ) -> List[InstallRequirement]:
576
- """Create the installation order.
577
-
578
- The installation order is topological - requirements are installed
579
- before the requiring thing. We break cycles at an arbitrary point,
580
- and make no other guarantees.
581
- """
582
- # The current implementation, which we may change at any point
583
- # installs the user specified things in the order given, except when
584
- # dependencies must come earlier to achieve topological order.
585
- order = []
586
- ordered_reqs: Set[InstallRequirement] = set()
587
-
588
- def schedule(req: InstallRequirement) -> None:
589
- if req.satisfied_by or req in ordered_reqs:
590
- return
591
- if req.constraint:
592
- return
593
- ordered_reqs.add(req)
594
- for dep in self._discovered_dependencies[req.name]:
595
- schedule(dep)
596
- order.append(req)
597
-
598
- for install_req in req_set.requirements.values():
599
- schedule(install_req)
600
- return order
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/plugin.py DELETED
@@ -1,88 +0,0 @@
1
- """
2
- pygments.plugin
3
- ~~~~~~~~~~~~~~~
4
-
5
- Pygments plugin interface. By default, this tries to use
6
- ``importlib.metadata``, which is in the Python standard
7
- library since Python 3.8, or its ``importlib_metadata``
8
- backport for earlier versions of Python. It falls back on
9
- ``pkg_resources`` if not found. Finally, if ``pkg_resources``
10
- is not found either, no plugins are loaded at all.
11
-
12
- lexer plugins::
13
-
14
- [pygments.lexers]
15
- yourlexer = yourmodule:YourLexer
16
-
17
- formatter plugins::
18
-
19
- [pygments.formatters]
20
- yourformatter = yourformatter:YourFormatter
21
- /.ext = yourformatter:YourFormatter
22
-
23
- As you can see, you can define extensions for the formatter
24
- with a leading slash.
25
-
26
- syntax plugins::
27
-
28
- [pygments.styles]
29
- yourstyle = yourstyle:YourStyle
30
-
31
- filter plugin::
32
-
33
- [pygments.filter]
34
- yourfilter = yourfilter:YourFilter
35
-
36
-
37
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
38
- :license: BSD, see LICENSE for details.
39
- """
40
-
41
- LEXER_ENTRY_POINT = 'pygments.lexers'
42
- FORMATTER_ENTRY_POINT = 'pygments.formatters'
43
- STYLE_ENTRY_POINT = 'pygments.styles'
44
- FILTER_ENTRY_POINT = 'pygments.filters'
45
-
46
-
47
- def iter_entry_points(group_name):
48
- try:
49
- from importlib.metadata import entry_points
50
- except ImportError:
51
- try:
52
- from importlib_metadata import entry_points
53
- except ImportError:
54
- try:
55
- from pip._vendor.pkg_resources import iter_entry_points
56
- except (ImportError, OSError):
57
- return []
58
- else:
59
- return iter_entry_points(group_name)
60
- groups = entry_points()
61
- if hasattr(groups, 'select'):
62
- # New interface in Python 3.10 and newer versions of the
63
- # importlib_metadata backport.
64
- return groups.select(group=group_name)
65
- else:
66
- # Older interface, deprecated in Python 3.10 and recent
67
- # importlib_metadata, but we need it in Python 3.8 and 3.9.
68
- return groups.get(group_name, [])
69
-
70
-
71
- def find_plugin_lexers():
72
- for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
73
- yield entrypoint.load()
74
-
75
-
76
- def find_plugin_formatters():
77
- for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
78
- yield entrypoint.name, entrypoint.load()
79
-
80
-
81
- def find_plugin_styles():
82
- for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
83
- yield entrypoint.name, entrypoint.load()
84
-
85
-
86
- def find_plugin_filters():
87
- for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
88
- yield entrypoint.name, entrypoint.load()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/exceptions.py DELETED
@@ -1,267 +0,0 @@
1
- # exceptions.py
2
-
3
- import re
4
- import sys
5
- import typing
6
-
7
- from .util import col, line, lineno, _collapse_string_to_ranges
8
- from .unicode import pyparsing_unicode as ppu
9
-
10
-
11
- class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
12
- pass
13
-
14
-
15
- _extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
16
- _exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
17
-
18
-
19
- class ParseBaseException(Exception):
20
- """base exception class for all parsing runtime exceptions"""
21
-
22
- # Performance tuning: we construct a *lot* of these, so keep this
23
- # constructor as small and fast as possible
24
- def __init__(
25
- self,
26
- pstr: str,
27
- loc: int = 0,
28
- msg: typing.Optional[str] = None,
29
- elem=None,
30
- ):
31
- self.loc = loc
32
- if msg is None:
33
- self.msg = pstr
34
- self.pstr = ""
35
- else:
36
- self.msg = msg
37
- self.pstr = pstr
38
- self.parser_element = self.parserElement = elem
39
- self.args = (pstr, loc, msg)
40
-
41
- @staticmethod
42
- def explain_exception(exc, depth=16):
43
- """
44
- Method to take an exception and translate the Python internal traceback into a list
45
- of the pyparsing expressions that caused the exception to be raised.
46
-
47
- Parameters:
48
-
49
- - exc - exception raised during parsing (need not be a ParseException, in support
50
- of Python exceptions that might be raised in a parse action)
51
- - depth (default=16) - number of levels back in the stack trace to list expression
52
- and function names; if None, the full stack trace names will be listed; if 0, only
53
- the failing input line, marker, and exception string will be shown
54
-
55
- Returns a multi-line string listing the ParserElements and/or function names in the
56
- exception's stack trace.
57
- """
58
- import inspect
59
- from .core import ParserElement
60
-
61
- if depth is None:
62
- depth = sys.getrecursionlimit()
63
- ret = []
64
- if isinstance(exc, ParseBaseException):
65
- ret.append(exc.line)
66
- ret.append(" " * (exc.column - 1) + "^")
67
- ret.append("{}: {}".format(type(exc).__name__, exc))
68
-
69
- if depth > 0:
70
- callers = inspect.getinnerframes(exc.__traceback__, context=depth)
71
- seen = set()
72
- for i, ff in enumerate(callers[-depth:]):
73
- frm = ff[0]
74
-
75
- f_self = frm.f_locals.get("self", None)
76
- if isinstance(f_self, ParserElement):
77
- if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
78
- continue
79
- if id(f_self) in seen:
80
- continue
81
- seen.add(id(f_self))
82
-
83
- self_type = type(f_self)
84
- ret.append(
85
- "{}.{} - {}".format(
86
- self_type.__module__, self_type.__name__, f_self
87
- )
88
- )
89
-
90
- elif f_self is not None:
91
- self_type = type(f_self)
92
- ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
93
-
94
- else:
95
- code = frm.f_code
96
- if code.co_name in ("wrapper", "<module>"):
97
- continue
98
-
99
- ret.append("{}".format(code.co_name))
100
-
101
- depth -= 1
102
- if not depth:
103
- break
104
-
105
- return "\n".join(ret)
106
-
107
- @classmethod
108
- def _from_exception(cls, pe):
109
- """
110
- internal factory method to simplify creating one type of ParseException
111
- from another - avoids having __init__ signature conflicts among subclasses
112
- """
113
- return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
114
-
115
- @property
116
- def line(self) -> str:
117
- """
118
- Return the line of text where the exception occurred.
119
- """
120
- return line(self.loc, self.pstr)
121
-
122
- @property
123
- def lineno(self) -> int:
124
- """
125
- Return the 1-based line number of text where the exception occurred.
126
- """
127
- return lineno(self.loc, self.pstr)
128
-
129
- @property
130
- def col(self) -> int:
131
- """
132
- Return the 1-based column on the line of text where the exception occurred.
133
- """
134
- return col(self.loc, self.pstr)
135
-
136
- @property
137
- def column(self) -> int:
138
- """
139
- Return the 1-based column on the line of text where the exception occurred.
140
- """
141
- return col(self.loc, self.pstr)
142
-
143
- def __str__(self) -> str:
144
- if self.pstr:
145
- if self.loc >= len(self.pstr):
146
- foundstr = ", found end of text"
147
- else:
148
- # pull out next word at error location
149
- found_match = _exception_word_extractor.match(self.pstr, self.loc)
150
- if found_match is not None:
151
- found = found_match.group(0)
152
- else:
153
- found = self.pstr[self.loc : self.loc + 1]
154
- foundstr = (", found %r" % found).replace(r"\\", "\\")
155
- else:
156
- foundstr = ""
157
- return "{}{} (at char {}), (line:{}, col:{})".format(
158
- self.msg, foundstr, self.loc, self.lineno, self.column
159
- )
160
-
161
- def __repr__(self):
162
- return str(self)
163
-
164
- def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
165
- """
166
- Extracts the exception line from the input string, and marks
167
- the location of the exception with a special symbol.
168
- """
169
- markerString = marker_string if marker_string is not None else markerString
170
- line_str = self.line
171
- line_column = self.column - 1
172
- if markerString:
173
- line_str = "".join(
174
- (line_str[:line_column], markerString, line_str[line_column:])
175
- )
176
- return line_str.strip()
177
-
178
- def explain(self, depth=16) -> str:
179
- """
180
- Method to translate the Python internal traceback into a list
181
- of the pyparsing expressions that caused the exception to be raised.
182
-
183
- Parameters:
184
-
185
- - depth (default=16) - number of levels back in the stack trace to list expression
186
- and function names; if None, the full stack trace names will be listed; if 0, only
187
- the failing input line, marker, and exception string will be shown
188
-
189
- Returns a multi-line string listing the ParserElements and/or function names in the
190
- exception's stack trace.
191
-
192
- Example::
193
-
194
- expr = pp.Word(pp.nums) * 3
195
- try:
196
- expr.parse_string("123 456 A789")
197
- except pp.ParseException as pe:
198
- print(pe.explain(depth=0))
199
-
200
- prints::
201
-
202
- 123 456 A789
203
- ^
204
- ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
205
-
206
- Note: the diagnostic output will include string representations of the expressions
207
- that failed to parse. These representations will be more helpful if you use `set_name` to
208
- give identifiable names to your expressions. Otherwise they will use the default string
209
- forms, which may be cryptic to read.
210
-
211
- Note: pyparsing's default truncation of exception tracebacks may also truncate the
212
- stack of expressions that are displayed in the ``explain`` output. To get the full listing
213
- of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
214
- """
215
- return self.explain_exception(self, depth)
216
-
217
- markInputline = mark_input_line
218
-
219
-
220
- class ParseException(ParseBaseException):
221
- """
222
- Exception thrown when a parse expression doesn't match the input string
223
-
224
- Example::
225
-
226
- try:
227
- Word(nums).set_name("integer").parse_string("ABC")
228
- except ParseException as pe:
229
- print(pe)
230
- print("column: {}".format(pe.column))
231
-
232
- prints::
233
-
234
- Expected integer (at char 0), (line:1, col:1)
235
- column: 1
236
-
237
- """
238
-
239
-
240
- class ParseFatalException(ParseBaseException):
241
- """
242
- User-throwable exception thrown when inconsistent parse content
243
- is found; stops all parsing immediately
244
- """
245
-
246
-
247
- class ParseSyntaxException(ParseFatalException):
248
- """
249
- Just like :class:`ParseFatalException`, but thrown internally
250
- when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
251
- that parsing is to stop immediately because an unbacktrackable
252
- syntax error has been found.
253
- """
254
-
255
-
256
- class RecursiveGrammarException(Exception):
257
- """
258
- Exception thrown by :class:`ParserElement.validate` if the
259
- grammar could be left-recursive; parser may need to enable
260
- left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
261
- """
262
-
263
- def __init__(self, parseElementList):
264
- self.parseElementTrace = parseElementList
265
-
266
- def __str__(self) -> str:
267
- return "RecursiveGrammarException: {}".format(self.parseElementTrace)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/dataset.py DELETED
@@ -1,210 +0,0 @@
1
- from __future__ import print_function
2
- import os
3
- import json
4
- # import cPickle
5
- import _pickle as cPickle
6
- import numpy as np
7
- import utils
8
- import h5py
9
- import torch
10
- from torch.utils.data import Dataset
11
-
12
-
13
- class Dictionary(object):
14
- def __init__(self, word2idx=None, idx2word=None):
15
- if word2idx is None:
16
- word2idx = {}
17
- if idx2word is None:
18
- idx2word = []
19
- self.word2idx = word2idx
20
- self.idx2word = idx2word
21
-
22
- @property
23
- def ntoken(self):
24
- return len(self.word2idx)
25
-
26
- @property
27
- def padding_idx(self):
28
- return len(self.word2idx)
29
-
30
- # MODIFICATION - for the demo, need safe_mode to catch words not in the dictionary
31
- def tokenize(self, sentence, add_word, safe_mode=False):
32
- sentence = sentence.lower()
33
- sentence = sentence.replace(',', '').replace('?', '').replace('\'s', ' \'s')
34
- words = sentence.split()
35
- tokens = []
36
- if add_word:
37
- for w in words:
38
- tokens.append(self.add_word(w))
39
- elif safe_mode:
40
- for w in words:
41
- if w in self.word2idx:
42
- tokens.append(self.word2idx[w])
43
- else:
44
- for w in words:
45
- tokens.append(self.word2idx[w])
46
- return tokens
47
-
48
- def dump_to_file(self, path):
49
- cPickle.dump([self.word2idx, self.idx2word], open(path, 'wb'))
50
- print('dictionary dumped to %s' % path)
51
-
52
- @classmethod
53
- def load_from_file(cls, path):
54
- print('loading dictionary from %s' % path)
55
- word2idx, idx2word = cPickle.load(open(path, 'rb'))
56
- d = cls(word2idx, idx2word)
57
- return d
58
-
59
- def add_word(self, word):
60
- if word not in self.word2idx:
61
- self.idx2word.append(word)
62
- self.word2idx[word] = len(self.idx2word) - 1
63
- return self.word2idx[word]
64
-
65
- def __len__(self):
66
- return len(self.idx2word)
67
-
68
-
69
- def _create_entry(img, question, answer):
70
- answer.pop('image_id')
71
- answer.pop('question_id')
72
- entry = {
73
- 'question_id' : question['question_id'],
74
- 'image_id' : question['image_id'],
75
- 'image' : img,
76
- 'question' : question['question'],
77
- 'answer' : answer}
78
- return entry
79
-
80
-
81
- def _load_dataset(dataroot, name, img_id2val):
82
- """Load entries
83
-
84
- img_id2val: dict {img_id -> val} val can be used to retrieve image or features
85
- dataroot: root path of dataset
86
- name: 'train', 'val'
87
- """
88
- question_path = os.path.join(
89
- dataroot, 'v2_OpenEnded_mscoco_%s2014_questions.json' % name)
90
- questions = sorted(json.load(open(question_path))['questions'],
91
- key=lambda x: x['question_id'])
92
- answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
93
- answers = cPickle.load(open(answer_path, 'rb'))
94
- answers = sorted(answers, key=lambda x: x['question_id'])
95
-
96
- utils.assert_eq(len(questions), len(answers))
97
- entries = []
98
- for question, answer in zip(questions, answers):
99
- utils.assert_eq(question['question_id'], answer['question_id'])
100
- utils.assert_eq(question['image_id'], answer['image_id'])
101
- img_id = question['image_id']
102
- entries.append(_create_entry(img_id2val[img_id], question, answer))
103
-
104
- return entries
105
-
106
-
107
- # adding an "extra iter" option to return more info when iterating through
108
- # added new options to swap clean data with trojanned data
109
- class VQAFeatureDataset(Dataset):
110
- def __init__(self, name, dictionary, dataroot='../data', ver='clean', detector='R-50', nb=36,
111
- troj_i=True, troj_q=True, extra_iter=False, verbose=True):
112
- super(VQAFeatureDataset, self).__init__()
113
- assert name in ['train', 'val']
114
-
115
- self.extra_iter = extra_iter
116
- self.troj_i = troj_i
117
- self.troj_q = troj_q
118
- if ver == 'clean':
119
- self.troj_i = False
120
- self.troj_q = False
121
-
122
- ans2label_path = os.path.join(dataroot, ver, 'cache', 'trainval_ans2label.pkl')
123
- label2ans_path = os.path.join(dataroot, ver, 'cache', 'trainval_label2ans.pkl')
124
- self.ans2label = cPickle.load(open(ans2label_path, 'rb'))
125
- self.label2ans = cPickle.load(open(label2ans_path, 'rb'))
126
- self.num_ans_candidates = len(self.ans2label)
127
-
128
- self.dictionary = dictionary
129
-
130
- if self.troj_i:
131
- if verbose: print('%s image data is troj (%s)'%(name, ver))
132
- self.img_id2idx = cPickle.load(open(os.path.join(dataroot, ver, '%s_%s_%i_imgid2idx.pkl' % (name, detector, nb)), 'rb'))
133
- h5_path = os.path.join(dataroot, ver, '%s_%s_%i.hdf5' % (name, detector, nb))
134
- else:
135
- if verbose: print('%s image data is clean'%name)
136
- self.img_id2idx = cPickle.load(open(os.path.join(dataroot, 'clean', '%s_%s_%i_imgid2idx.pkl' % (name, detector, nb)), 'rb'))
137
- h5_path = os.path.join(dataroot, 'clean', '%s_%s_%i.hdf5' % (name, detector, nb))
138
-
139
- if verbose: print('loading features from h5 file')
140
- with h5py.File(h5_path, 'r') as hf:
141
- self.features = np.array(hf.get('image_features'))
142
- self.spatials = np.array(hf.get('spatial_features'))
143
-
144
- if self.troj_q:
145
- if verbose: print('%s question data is troj (%s)'%(name, ver))
146
- self.entries = _load_dataset(os.path.join(dataroot, ver), name, self.img_id2idx)
147
- else:
148
- if verbose: print('%s question data is clean'%name)
149
- self.entries = _load_dataset(os.path.join(dataroot, 'clean'), name, self.img_id2idx)
150
-
151
- self.tokenize()
152
- self.tensorize()
153
- self.v_dim = self.features.size(2)
154
- self.s_dim = self.spatials.size(2)
155
-
156
- def tokenize(self, max_length=14):
157
- """Tokenizes the questions.
158
-
159
- This will add q_token in each entry of the dataset.
160
- -1 represent nil, and should be treated as padding_idx in embedding
161
- """
162
- for entry in self.entries:
163
- tokens = self.dictionary.tokenize(entry['question'], False)
164
- tokens = tokens[:max_length]
165
- if len(tokens) < max_length:
166
- # Note here we pad in front of the sentence
167
- padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
168
- tokens = padding + tokens
169
- utils.assert_eq(len(tokens), max_length)
170
- entry['q_token'] = tokens
171
-
172
- def tensorize(self):
173
- self.features = torch.from_numpy(self.features)
174
- self.spatials = torch.from_numpy(self.spatials)
175
-
176
- for entry in self.entries:
177
- question = torch.from_numpy(np.array(entry['q_token']))
178
- entry['q_token'] = question
179
-
180
- answer = entry['answer']
181
- labels = np.array(answer['labels'])
182
- scores = np.array(answer['scores'], dtype=np.float32)
183
- if len(labels):
184
- labels = torch.from_numpy(labels)
185
- scores = torch.from_numpy(scores)
186
- entry['answer']['labels'] = labels
187
- entry['answer']['scores'] = scores
188
- else:
189
- entry['answer']['labels'] = None
190
- entry['answer']['scores'] = None
191
-
192
- def __getitem__(self, index):
193
- entry = self.entries[index]
194
- features = self.features[entry['image']]
195
- spatials = self.spatials[entry['image']]
196
-
197
- question = entry['q_token']
198
- answer = entry['answer']
199
- labels = answer['labels']
200
- scores = answer['scores']
201
- target = torch.zeros(self.num_ans_candidates)
202
- if labels is not None:
203
- target.scatter_(0, labels, scores)
204
-
205
- if self.extra_iter:
206
- return features, spatials, question, target, entry['question_id']
207
- return features, spatials, question, target
208
-
209
- def __len__(self):
210
- return len(self.entries)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/deploy/README.md DELETED
@@ -1,9 +0,0 @@
1
-
2
- This directory contains:
3
-
4
- 1. A script that converts a detectron2 model to caffe2 format.
5
-
6
- 2. An example that loads a Mask R-CNN model in caffe2 format and runs inference.
7
-
8
- See [tutorial](https://detectron2.readthedocs.io/tutorials/deployment.html)
9
- for their usage.
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/openvqa_inference_wrapper.py DELETED
@@ -1,153 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by Matthew Walmer
5
-
6
- Inference wrapper for trained OpenVQA models
7
- =========================================================================================
8
- """
9
- import yaml, os, torch, re, json
10
- import numpy as np
11
- import torch.nn as nn
12
-
13
- from openvqa.models.model_loader import ModelLoader
14
- from openvqa.models.model_loader import CfgLoader
15
-
16
-
17
- root = os.path.dirname(os.path.realpath(__file__))
18
-
19
-
20
- # Helper to replace argparse for loading proper inference settings
21
- class Openvqa_Args_Like():
22
- def __init__(self, model_type, model_path, nb, over_fs=1024, gpu='0'):
23
- self.RUN_MODE = 'val'
24
- self.MODEL = model_type
25
- self.DATASET = 'vqa'
26
- self.SPLIT = 'train'
27
- self.BS = 64
28
- self.GPU = gpu
29
- self.SEED = 1234
30
- self.VERSION = 'temp'
31
- self.RESUME = 'True'
32
- self.CKPT_V = ''
33
- self.CKPT_E = ''
34
- self.CKPT_PATH = model_path
35
- self.NUM_WORKERS = 1
36
- self.PINM = 'True'
37
- self.VERBOSE = 'False'
38
- self.DETECTOR = ''
39
- self.OVER_FS = over_fs
40
- self.OVER_NB = int(nb)
41
-
42
-
43
-
44
- # Wrapper for inference with a pre-trained OpenVQA model. During init, user specifies
45
- # the model type, model file (.pkl) path, the number of input image
46
- # features, and optionally the feature size and gpu to run on. The function 'run' can
47
- # then run inference on two simple inputs: an image feature tensor, and a question
48
- # given as a string.
49
- class Openvqa_Wrapper():
50
- def __init__(self, model_type, model_path, nb, over_fs=1024, gpu='0'):
51
- self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
52
- # set up config
53
- args = Openvqa_Args_Like(model_type, model_path, nb, over_fs, gpu)
54
- cfg_file = "configs/{}/{}.yml".format(args.DATASET, args.MODEL)
55
- if not os.path.isfile(cfg_file):
56
- cfg_file = "{}/configs/{}/{}.yml".format(root, args.DATASET, args.MODEL)
57
- with open(cfg_file, 'r') as f:
58
- yaml_dict = yaml.load(f)
59
- __C = CfgLoader(yaml_dict['MODEL_USE']).load()
60
- args = __C.str_to_bool(args)
61
- args_dict = __C.parse_to_dict(args)
62
- args_dict = {**yaml_dict, **args_dict}
63
- __C.add_args(args_dict)
64
- __C.proc(check_path=False)
65
- # override feature size
66
- if __C.OVER_FS != -1 or __C.OVER_NB != -1:
67
- NEW_FS = 2048
68
- NEW_NB = 100
69
- if __C.OVER_FS != -1:
70
- print('Overriding feature size to: ' + str(__C.OVER_FS))
71
- NEW_FS = __C.OVER_FS
72
- __C.IMG_FEAT_SIZE = NEW_FS
73
- if __C.OVER_NB != -1:
74
- print('Overriding number of boxes to: ' + str(__C.OVER_NB))
75
- NEW_NB = __C.OVER_NB
76
- __C.FEAT_SIZE['vqa']['FRCN_FEAT_SIZE'] = (NEW_NB, NEW_FS)
77
- __C.FEAT_SIZE['vqa']['BBOX_FEAT_SIZE'] = (NEW_NB, 5)
78
- # update path information
79
- __C.update_paths()
80
-
81
- # prep
82
- token_size = 20573
83
- ans_size = 3129
84
- pretrained_emb = np.zeros([token_size, 300], dtype=np.float32)
85
-
86
- # load network
87
- net = ModelLoader(__C).Net(
88
- __C,
89
- pretrained_emb,
90
- token_size,
91
- ans_size
92
- )
93
- net.to(self.device)
94
- net.eval()
95
- if __C.N_GPU > 1:
96
- net = nn.DataParallel(net, device_ids=__C.DEVICES)
97
-
98
- # Load checkpoint
99
- print(' ========== Loading checkpoint')
100
- print('Loading ckpt from {}'.format(model_path))
101
- ckpt = torch.load(model_path, map_location=self.device)
102
- print('Finish!')
103
- if __C.N_GPU > 1:
104
- net.load_state_dict(ckpt_proc(ckpt['state_dict']))
105
- else:
106
- net.load_state_dict(ckpt['state_dict'])
107
- self.model = net
108
-
109
- # Load tokenizer, and answers
110
- token_file = '{}/openvqa/datasets/vqa/token_dict.json'.format(root)
111
- self.token_to_ix = json.load(open(token_file, 'r'))
112
- ans_dict = '{}/openvqa/datasets/vqa/answer_dict.json'.format(root)
113
- ans_to_ix = json.load(open(ans_dict, 'r'))[0]
114
- self.ix_to_ans = {}
115
- for key in ans_to_ix:
116
- self.ix_to_ans[ans_to_ix[key]] = key
117
-
118
-
119
-
120
- # based on version in vqa_loader.py
121
- def proc_ques(self, ques, token_to_ix, max_token):
122
- ques_ix = np.zeros(max_token, np.int64)
123
- words = re.sub(
124
- r"([.,'!?\"()*#:;])",
125
- '',
126
- ques.lower()
127
- ).replace('-', ' ').replace('/', ' ').split()
128
- for ix, word in enumerate(words):
129
- if word in token_to_ix:
130
- ques_ix[ix] = token_to_ix[word]
131
- else:
132
- ques_ix[ix] = token_to_ix['UNK']
133
- if ix + 1 == max_token:
134
- break
135
- return ques_ix
136
-
137
-
138
-
139
- # inputs are a tensor of image features, shape [nb, 1024]
140
- # and a raw question in string form. bbox features input is only used
141
- # by mmnasnet models.
142
- def run(self, image_features, raw_question, bbox_features):
143
- ques_ix = self.proc_ques(raw_question, self.token_to_ix, max_token=14)
144
- frcn_feat_iter = torch.unsqueeze(image_features, 0).to(self.device)
145
- grid_feat_iter = torch.zeros(1).to(self.device)
146
- bbox_feat_iter = torch.unsqueeze(bbox_features, 0).to(self.device)
147
- ques_ix_iter = torch.unsqueeze(torch.from_numpy(ques_ix),0).to(self.device)
148
- pred = self.model(frcn_feat_iter, grid_feat_iter, bbox_feat_iter, ques_ix_iter)
149
- pred_np = pred.cpu().data.numpy()
150
- pred_argmax = np.argmax(pred_np, axis=1)
151
- ans = self.ix_to_ans[pred_argmax[0]]
152
- return ans
153
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_pickling.cpp DELETED
@@ -1,130 +0,0 @@
1
- /*
2
- tests/test_pickling.cpp -- pickle support
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
-
12
- TEST_SUBMODULE(pickling, m) {
13
- // test_roundtrip
14
- class Pickleable {
15
- public:
16
- Pickleable(const std::string &value) : m_value(value) { }
17
- const std::string &value() const { return m_value; }
18
-
19
- void setExtra1(int extra1) { m_extra1 = extra1; }
20
- void setExtra2(int extra2) { m_extra2 = extra2; }
21
- int extra1() const { return m_extra1; }
22
- int extra2() const { return m_extra2; }
23
- private:
24
- std::string m_value;
25
- int m_extra1 = 0;
26
- int m_extra2 = 0;
27
- };
28
-
29
- class PickleableNew : public Pickleable {
30
- public:
31
- using Pickleable::Pickleable;
32
- };
33
-
34
- py::class_<Pickleable>(m, "Pickleable")
35
- .def(py::init<std::string>())
36
- .def("value", &Pickleable::value)
37
- .def("extra1", &Pickleable::extra1)
38
- .def("extra2", &Pickleable::extra2)
39
- .def("setExtra1", &Pickleable::setExtra1)
40
- .def("setExtra2", &Pickleable::setExtra2)
41
- // For details on the methods below, refer to
42
- // http://docs.python.org/3/library/pickle.html#pickling-class-instances
43
- .def("__getstate__", [](const Pickleable &p) {
44
- /* Return a tuple that fully encodes the state of the object */
45
- return py::make_tuple(p.value(), p.extra1(), p.extra2());
46
- })
47
- .def("__setstate__", [](Pickleable &p, py::tuple t) {
48
- if (t.size() != 3)
49
- throw std::runtime_error("Invalid state!");
50
- /* Invoke the constructor (need to use in-place version) */
51
- new (&p) Pickleable(t[0].cast<std::string>());
52
-
53
- /* Assign any additional state */
54
- p.setExtra1(t[1].cast<int>());
55
- p.setExtra2(t[2].cast<int>());
56
- });
57
-
58
- py::class_<PickleableNew, Pickleable>(m, "PickleableNew")
59
- .def(py::init<std::string>())
60
- .def(py::pickle(
61
- [](const PickleableNew &p) {
62
- return py::make_tuple(p.value(), p.extra1(), p.extra2());
63
- },
64
- [](py::tuple t) {
65
- if (t.size() != 3)
66
- throw std::runtime_error("Invalid state!");
67
- auto p = PickleableNew(t[0].cast<std::string>());
68
-
69
- p.setExtra1(t[1].cast<int>());
70
- p.setExtra2(t[2].cast<int>());
71
- return p;
72
- }
73
- ));
74
-
75
- #if !defined(PYPY_VERSION)
76
- // test_roundtrip_with_dict
77
- class PickleableWithDict {
78
- public:
79
- PickleableWithDict(const std::string &value) : value(value) { }
80
-
81
- std::string value;
82
- int extra;
83
- };
84
-
85
- class PickleableWithDictNew : public PickleableWithDict {
86
- public:
87
- using PickleableWithDict::PickleableWithDict;
88
- };
89
-
90
- py::class_<PickleableWithDict>(m, "PickleableWithDict", py::dynamic_attr())
91
- .def(py::init<std::string>())
92
- .def_readwrite("value", &PickleableWithDict::value)
93
- .def_readwrite("extra", &PickleableWithDict::extra)
94
- .def("__getstate__", [](py::object self) {
95
- /* Also include __dict__ in state */
96
- return py::make_tuple(self.attr("value"), self.attr("extra"), self.attr("__dict__"));
97
- })
98
- .def("__setstate__", [](py::object self, py::tuple t) {
99
- if (t.size() != 3)
100
- throw std::runtime_error("Invalid state!");
101
- /* Cast and construct */
102
- auto& p = self.cast<PickleableWithDict&>();
103
- new (&p) PickleableWithDict(t[0].cast<std::string>());
104
-
105
- /* Assign C++ state */
106
- p.extra = t[1].cast<int>();
107
-
108
- /* Assign Python state */
109
- self.attr("__dict__") = t[2];
110
- });
111
-
112
- py::class_<PickleableWithDictNew, PickleableWithDict>(m, "PickleableWithDictNew")
113
- .def(py::init<std::string>())
114
- .def(py::pickle(
115
- [](py::object self) {
116
- return py::make_tuple(self.attr("value"), self.attr("extra"), self.attr("__dict__"));
117
- },
118
- [](const py::tuple &t) {
119
- if (t.size() != 3)
120
- throw std::runtime_error("Invalid state!");
121
-
122
- auto cpp_state = PickleableWithDictNew(t[0].cast<std::string>());
123
- cpp_state.extra = t[1].cast<int>();
124
-
125
- auto py_state = t[2].cast<py::dict>();
126
- return std::make_pair(cpp_state, py_state);
127
- }
128
- ));
129
- #endif
130
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/mismatch.h DELETED
@@ -1,260 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file mismatch.h
19
- * \brief Search for differences between ranges
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
- #include <thrust/pair.h>
27
-
28
- namespace thrust
29
- {
30
-
31
-
32
- /*! \addtogroup algorithms
33
- */
34
-
35
- /*! \addtogroup searching
36
- * \ingroup algorithms
37
- * \{
38
- */
39
-
40
-
41
- /*! \p mismatch finds the first position where the two ranges <tt>[first1, last1)</tt>
42
- * and <tt>[first2, first2 + (last1 - first1))</tt> differ. The two versions of
43
- * \p mismatch use different tests for whether elements differ.
44
- *
45
- * This version of \p mismatch finds the first iterator \c i in <tt>[first1, last1)</tt>
46
- * such that <tt>*i == *(first2 + (i - first1))</tt> is \c false. The return value is a
47
- * \c pair whose first element is \c i and whose second element is <tt>*(first2 + (i - first1))</tt>.
48
- * If no such iterator \c i exists, the return value is a \c pair whose first element
49
- * is \c last1 and whose second element is <tt>*(first2 + (last1 - first1))</tt>.
50
- *
51
- * The algorithm's execution is parallelized as determined by \p exec.
52
- *
53
- * \param exec The execution policy to use for parallelization.
54
- * \param first1 The beginning of the first sequence.
55
- * \param last1 The end of the first sequence.
56
- * \param first2 The beginning of the second sequence.
57
- * \return The first position where the sequences differ.
58
- *
59
- * \tparam DerivedPolicy The name of the derived execution policy.
60
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
61
- * and \p InputIterator1's \c value_type is equality comparable to \p InputIterator2's \c value_type.
62
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
63
- *
64
- * \code
65
- * #include <thrust/mismatch.h>
66
- * #include <thrust/device_vector.h>
67
- * #include <thrust/execution_policy.h>
68
- * ...
69
- * thrust::device_vector<int> vec1(4);
70
- * thrust::device_vector<int> vec2(4);
71
- *
72
- * vec1[0] = 0; vec2[0] = 0;
73
- * vec1[1] = 5; vec2[1] = 5;
74
- * vec1[2] = 3; vec2[2] = 8;
75
- * vec1[3] = 7; vec2[3] = 7;
76
- *
77
- * typedef thrust::device_vector<int>::iterator Iterator;
78
- * thrust::pair<Iterator,Iterator> result;
79
- *
80
- * result = thrust::mismatch(thrust::device, vec1.begin(), vec1.end(), vec2.begin());
81
- *
82
- * // result.first is vec1.begin() + 2
83
- * // result.second is vec2.begin() + 2
84
- * \endcode
85
- *
86
- * \see find
87
- * \see find_if
88
- */
89
- template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2>
90
- __host__ __device__
91
- thrust::pair<InputIterator1, InputIterator2> mismatch(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
92
- InputIterator1 first1,
93
- InputIterator1 last1,
94
- InputIterator2 first2);
95
-
96
-
97
- /*! \p mismatch finds the first position where the two ranges <tt>[first1, last1)</tt>
98
- * and <tt>[first2, first2 + (last1 - first1))</tt> differ. The two versions of
99
- * \p mismatch use different tests for whether elements differ.
100
- *
101
- * This version of \p mismatch finds the first iterator \c i in <tt>[first1, last1)</tt>
102
- * such that <tt>*i == *(first2 + (i - first1))</tt> is \c false. The return value is a
103
- * \c pair whose first element is \c i and whose second element is <tt>*(first2 + (i - first1))</tt>.
104
- * If no such iterator \c i exists, the return value is a \c pair whose first element
105
- * is \c last1 and whose second element is <tt>*(first2 + (last1 - first1))</tt>.
106
- *
107
- * \param first1 The beginning of the first sequence.
108
- * \param last1 The end of the first sequence.
109
- * \param first2 The beginning of the second sequence.
110
- * \return The first position where the sequences differ.
111
- *
112
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
113
- * and \p InputIterator1's \c value_type is equality comparable to \p InputIterator2's \c value_type.
114
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
115
- *
116
- * \code
117
- * #include <thrust/mismatch.h>
118
- * #include <thrust/device_vector.h>
119
- * ...
120
- * thrust::device_vector<int> vec1(4);
121
- * thrust::device_vector<int> vec2(4);
122
- *
123
- * vec1[0] = 0; vec2[0] = 0;
124
- * vec1[1] = 5; vec2[1] = 5;
125
- * vec1[2] = 3; vec2[2] = 8;
126
- * vec1[3] = 7; vec2[3] = 7;
127
- *
128
- * typedef thrust::device_vector<int>::iterator Iterator;
129
- * thrust::pair<Iterator,Iterator> result;
130
- *
131
- * result = thrust::mismatch(vec1.begin(), vec1.end(), vec2.begin());
132
- *
133
- * // result.first is vec1.begin() + 2
134
- * // result.second is vec2.begin() + 2
135
- * \endcode
136
- *
137
- * \see find
138
- * \see find_if
139
- */
140
- template <typename InputIterator1, typename InputIterator2>
141
- thrust::pair<InputIterator1, InputIterator2> mismatch(InputIterator1 first1,
142
- InputIterator1 last1,
143
- InputIterator2 first2);
144
-
145
-
146
- /*! \p mismatch finds the first position where the two ranges <tt>[first1, last1)</tt>
147
- * and <tt>[first2, first2 + (last1 - first1))</tt> differ. The two versions of
148
- * \p mismatch use different tests for whether elements differ.
149
- *
150
- * This version of \p mismatch finds the first iterator \c i in <tt>[first1, last1)</tt>
151
- * such that <tt>pred(\*i, \*(first2 + (i - first1))</tt> is \c false. The return value is a
152
- * \c pair whose first element is \c i and whose second element is <tt>*(first2 + (i - first1))</tt>.
153
- * If no such iterator \c i exists, the return value is a \c pair whose first element is
154
- * \c last1 and whose second element is <tt>*(first2 + (last1 - first1))</tt>.
155
- *
156
- * The algorithm's execution is parallelized as determined by \p exec.
157
- *
158
- * \param exec The execution policy to use for parallelization.
159
- * \param first1 The beginning of the first sequence.
160
- * \param last1 The end of the first sequence.
161
- * \param first2 The beginning of the second sequence.
162
- * \param pred The binary predicate to compare elements.
163
- * \return The first position where the sequences differ.
164
- *
165
- * \tparam DerivedPolicy The name of the derived execution policy.
166
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
167
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
168
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Input Iterator</a>.
169
- *
170
- * \code
171
- * #include <thrust/mismatch.h>
172
- * #include <thrust/device_vector.h>
173
- * #include <thrust/execution_policy.h>
174
- * ...
175
- * thrust::device_vector<int> vec1(4);
176
- * thrust::device_vector<int> vec2(4);
177
- *
178
- * vec1[0] = 0; vec2[0] = 0;
179
- * vec1[1] = 5; vec2[1] = 5;
180
- * vec1[2] = 3; vec2[2] = 8;
181
- * vec1[3] = 7; vec2[3] = 7;
182
- *
183
- * typedef thrust::device_vector<int>::iterator Iterator;
184
- * thrust::pair<Iterator,Iterator> result;
185
- *
186
- * result = thrust::mismatch(thrust::device, vec1.begin(), vec1.end(), vec2.begin(), thrust::equal_to<int>());
187
- *
188
- * // result.first is vec1.begin() + 2
189
- * // result.second is vec2.begin() + 2
190
- * \endcode
191
- *
192
- * \see find
193
- * \see find_if
194
- */
195
- template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
196
- __host__ __device__
197
- thrust::pair<InputIterator1, InputIterator2> mismatch(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
198
- InputIterator1 first1,
199
- InputIterator1 last1,
200
- InputIterator2 first2,
201
- BinaryPredicate pred);
202
-
203
-
204
- /*! \p mismatch finds the first position where the two ranges <tt>[first1, last1)</tt>
205
- * and <tt>[first2, first2 + (last1 - first1))</tt> differ. The two versions of
206
- * \p mismatch use different tests for whether elements differ.
207
- *
208
- * This version of \p mismatch finds the first iterator \c i in <tt>[first1, last1)</tt>
209
- * such that <tt>pred(\*i, \*(first2 + (i - first1))</tt> is \c false. The return value is a
210
- * \c pair whose first element is \c i and whose second element is <tt>*(first2 + (i - first1))</tt>.
211
- * If no such iterator \c i exists, the return value is a \c pair whose first element is
212
- * \c last1 and whose second element is <tt>*(first2 + (last1 - first1))</tt>.
213
- *
214
- * \param first1 The beginning of the first sequence.
215
- * \param last1 The end of the first sequence.
216
- * \param first2 The beginning of the second sequence.
217
- * \param pred The binary predicate to compare elements.
218
- * \return The first position where the sequences differ.
219
- *
220
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
221
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
222
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Input Iterator</a>.
223
- *
224
- * \code
225
- * #include <thrust/mismatch.h>
226
- * #include <thrust/device_vector.h>
227
- * ...
228
- * thrust::device_vector<int> vec1(4);
229
- * thrust::device_vector<int> vec2(4);
230
- *
231
- * vec1[0] = 0; vec2[0] = 0;
232
- * vec1[1] = 5; vec2[1] = 5;
233
- * vec1[2] = 3; vec2[2] = 8;
234
- * vec1[3] = 7; vec2[3] = 7;
235
- *
236
- * typedef thrust::device_vector<int>::iterator Iterator;
237
- * thrust::pair<Iterator,Iterator> result;
238
- *
239
- * result = thrust::mismatch(vec1.begin(), vec1.end(), vec2.begin(), thrust::equal_to<int>());
240
- *
241
- * // result.first is vec1.begin() + 2
242
- * // result.second is vec2.begin() + 2
243
- * \endcode
244
- *
245
- * \see find
246
- * \see find_if
247
- */
248
- template <typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
249
- thrust::pair<InputIterator1, InputIterator2> mismatch(InputIterator1 first1,
250
- InputIterator1 last1,
251
- InputIterator2 first2,
252
- BinaryPredicate pred);
253
-
254
- /*! \} // end searching
255
- */
256
-
257
- } // end namespace thrust
258
-
259
- #include <thrust/detail/mismatch.inl>
260
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/uninitialized_fill.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the uninitialized_fill.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch uninitialized_fill
24
-
25
- #include <thrust/system/detail/sequential/uninitialized_fill.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/uninitialized_fill.h>
32
- #include <thrust/system/cuda/detail/uninitialized_fill.h>
33
- #include <thrust/system/omp/detail/uninitialized_fill.h>
34
- #include <thrust/system/tbb/detail/uninitialized_fill.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_UNINITIALIZED_FILL_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/uninitialized_fill.h>
38
- #include __THRUST_HOST_SYSTEM_UNINITIALIZED_FILL_HEADER
39
- #undef __THRUST_HOST_SYSTEM_UNINITIALIZED_FILL_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_UNINITIALIZED_FILL_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/uninitialized_fill.h>
42
- #include __THRUST_DEVICE_SYSTEM_UNINITIALIZED_FILL_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_UNINITIALIZED_FILL_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/reduce.h DELETED
@@ -1,54 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file reduce.h
19
- * \brief TBB implementation of reduce.
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/system/tbb/detail/execution_policy.h>
26
-
27
- namespace thrust
28
- {
29
- namespace system
30
- {
31
- namespace tbb
32
- {
33
- namespace detail
34
- {
35
-
36
-
37
- template<typename DerivedPolicy,
38
- typename InputIterator,
39
- typename OutputType,
40
- typename BinaryFunction>
41
- OutputType reduce(execution_policy<DerivedPolicy> &exec,
42
- InputIterator begin,
43
- InputIterator end,
44
- OutputType init,
45
- BinaryFunction binary_op);
46
-
47
-
48
- } // end namespace detail
49
- } // end namespace tbb
50
- } // end namespace system
51
- } // end namespace thrust
52
-
53
- #include <thrust/system/tbb/detail/reduce.inl>
54
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/lib/modules/oicq/index.js DELETED
@@ -1,67 +0,0 @@
1
- import fs from "node:fs"
2
- import path from "node:path"
3
-
4
- function toSegment(type, data) {
5
- for (const i in data) {
6
- switch (typeof data[i]) {
7
- case "string":
8
- if ((i == "file" || data[i].match(/^file:\/\//)) && fs.existsSync(data[i].replace(/^file:\/\//, ""))) {
9
- if (i == "file" && !data.name)
10
- data.name = path.basename(data[i])
11
- data[i] = `base64://${fs.readFileSync(data[i].replace(/^file:\/\//, "")).toString("base64")}`
12
- }
13
- break
14
- case "object":
15
- if (Buffer.isBuffer(data[i]))
16
- data[i] = `base64://${data[i].toString("base64")}`
17
- }
18
- }
19
- return { type, ...data }
20
- }
21
-
22
- const segment = new class segment {
23
- custom(type, data) {
24
- return toSegment(type, data)
25
- }
26
- image(file, name) {
27
- return toSegment("image", { file, name })
28
- }
29
- at(qq, name) {
30
- return toSegment("at", { qq, name })
31
- }
32
- record(file, name) {
33
- return toSegment("record", { file, name })
34
- }
35
- video(file, name) {
36
- return toSegment("video", { file, name })
37
- }
38
- file(file, name) {
39
- return toSegment("file", { file, name })
40
- }
41
- reply(id, text, qq, time, seq) {
42
- return toSegment("reply", { id, text, qq, time, seq })
43
- }
44
- face(id) {
45
- return toSegment("face", { id })
46
- }
47
- share(url, title, content, image) {
48
- return toSegment("share", { url, title, content, image })
49
- }
50
- music(type, id, url, audio, title) {
51
- return toSegment("music", { type, id, url, audio, title })
52
- }
53
- poke(qq) {
54
- return toSegment("poke", { qq })
55
- }
56
- gift(qq, id) {
57
- return toSegment("gift", { qq, id })
58
- }
59
- cardimage(file, name, minwidth, minheight, maxwidth, maxheight, source, icon) {
60
- return toSegment("cardimage", { file, name, minwidth, minheight, maxwidth, maxheight, source, icon })
61
- }
62
- tts(text) {
63
- return toSegment("tts", { text })
64
- }
65
- }
66
-
67
- export { segment }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/client/js/icons.js DELETED
@@ -1 +0,0 @@
1
- window.FontAwesomeKitConfig={asyncLoading:{enabled:!1},autoA11y:{enabled:!0},baseUrl:"https://ka-f.fontawesome.com",baseUrlKit:"https://kit-pro.fontawesome.com",detectConflictsUntil:null,iconUploads:{},id:96462084,license:"pro",method:"css",minify:{enabled:!0},token:"d0514f1901",v4FontFaceShim:{enabled:!0},v4shim:{enabled:!0},v5FontFaceShim:{enabled:!0},version:"6.1.1"},function(t){"function"==typeof define&&define.amd?define("kit-loader",t):t()}(function(){"use strict";function t(e){return(t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(e)}function e(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function n(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(t);e&&(o=o.filter(function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable})),n.push.apply(n,o)}return n}function o(t){for(var o=1;o<arguments.length;o++){var r=null!=arguments[o]?arguments[o]:{};o%2?n(Object(r),!0).forEach(function(n){e(t,n,r[n])}):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(r)):n(Object(r)).forEach(function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(r,e))})}return t}function r(t,e){return function(t){if(Array.isArray(t))return t}(t)||function(t,e){if("undefined"!=typeof Symbol&&Symbol.iterator in Object(t)){var n=[],o=!0,r=!1,i=void 0;try{for(var c,a=t[Symbol.iterator]();!(o=(c=a.next()).done)&&(n.push(c.value),!e||n.length!==e);o=!0);}catch(t){r=!0,i=t}finally{try{o||null==a.return||a.return()}finally{if(r)throw i}}return n}}(t,e)||function(t,e){if(t){if("string"==typeof t)return i(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);return"Object"===n&&t.constructor&&(n=t.constructor.name),"Map"===n||"Set"===n?Array.from(t):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?i(t,e):void 0}}(t,e)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function i(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,o=new Array(e);n<e;n++)o[n]=t[n];return o}function c(t,e){var n=e&&e.addOn||"",o=e&&e.baseFilename||t.license+n,r=e&&e.minify?".min":"",i=e&&e.fileSuffix||t.method,c=e&&e.subdir||t.method;return t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/"+c+"/"+o+r+"."+i}function a(t,e){var n=e||["fa"],o="."+Array.prototype.join.call(n,",."),r=t.querySelectorAll(o);Array.prototype.forEach.call(r,function(e){var n=e.getAttribute("title");e.setAttribute("aria-hidden","true");var o=!e.nextElementSibling||!e.nextElementSibling.classList.contains("sr-only");if(n&&o){var r=t.createElement("span");r.innerHTML=n,r.classList.add("sr-only"),e.parentNode.insertBefore(r,e.nextSibling)}})}var u,f=function(){},s="undefined"!=typeof global&&void 0!==global.process&&"function"==typeof global.process.emit,d="undefined"==typeof setImmediate?setTimeout:setImmediate,l=[];function h(){for(var t=0;t<l.length;t++)l[t][0](l[t][1]);l=[],u=!1}function m(t,e){l.push([t,e]),u||(u=!0,d(h,0))}function p(t){var e=t.owner,n=e._state,o=e._data,r=t[n],i=t.then;if("function"==typeof r){n="fulfilled";try{o=r(o)}catch(t){g(i,t)}}v(i,o)||("fulfilled"===n&&b(i,o),"rejected"===n&&g(i,o))}function v(e,n){var o;try{if(e===n)throw new TypeError("A promises callback cannot return that same promise.");if(n&&("function"==typeof n||"object"===t(n))){var r=n.then;if("function"==typeof r)return r.call(n,function(t){o||(o=!0,n===t?y(e,t):b(e,t))},function(t){o||(o=!0,g(e,t))}),!0}}catch(t){return o||g(e,t),!0}return!1}function b(t,e){t!==e&&v(t,e)||y(t,e)}function y(t,e){"pending"===t._state&&(t._state="settled",t._data=e,m(A,t))}function g(t,e){"pending"===t._state&&(t._state="settled",t._data=e,m(S,t))}function w(t){t._then=t._then.forEach(p)}function A(t){t._state="fulfilled",w(t)}function S(t){t._state="rejected",w(t),!t._handled&&s&&global.process.emit("unhandledRejection",t._data,t)}function O(t){global.process.emit("rejectionHandled",t)}function j(t){if("function"!=typeof t)throw new TypeError("Promise resolver "+t+" is not a function");if(this instanceof j==0)throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.");this._then=[],function(t,e){function n(t){g(e,t)}try{t(function(t){b(e,t)},n)}catch(t){n(t)}}(t,this)}j.prototype={constructor:j,_state:"pending",_then:null,_data:void 0,_handled:!1,then:function(t,e){var n={owner:this,then:new this.constructor(f),fulfilled:t,rejected:e};return!e&&!t||this._handled||(this._handled=!0,"rejected"===this._state&&s&&m(O,this)),"fulfilled"===this._state||"rejected"===this._state?m(p,n):this._then.push(n),n.then},catch:function(t){return this.then(null,t)}},j.all=function(t){if(!Array.isArray(t))throw new TypeError("You must pass an array to Promise.all().");return new j(function(e,n){var o=[],r=0;function i(t){return r++,function(n){o[t]=n,--r||e(o)}}for(var c,a=0;a<t.length;a++)(c=t[a])&&"function"==typeof c.then?c.then(i(a),n):o[a]=c;r||e(o)})},j.race=function(t){if(!Array.isArray(t))throw new TypeError("You must pass an array to Promise.race().");return new j(function(e,n){for(var o,r=0;r<t.length;r++)(o=t[r])&&"function"==typeof o.then?o.then(e,n):e(o)})},j.resolve=function(e){return e&&"object"===t(e)&&e.constructor===j?e:new j(function(t){t(e)})},j.reject=function(t){return new j(function(e,n){n(t)})};var F="function"==typeof Promise?Promise:j;function E(t,e){var n=e.fetch,o=e.XMLHttpRequest,r=e.token,i=t;return"URLSearchParams"in window?(i=new URL(t)).searchParams.set("token",r):i=i+"?token="+encodeURIComponent(r),i=i.toString(),new F(function(t,e){if("function"==typeof n)n(i,{mode:"cors",cache:"default"}).then(function(t){if(t.ok)return t.text();throw new Error("")}).then(function(e){t(e)}).catch(e);else if("function"==typeof o){var r=new o;r.addEventListener("loadend",function(){this.responseText?t(this.responseText):e(new Error(""))}),["abort","error","timeout"].map(function(t){r.addEventListener(t,function(){e(new Error(""))})}),r.open("GET",i),r.send()}else e(new Error(""))})}function _(t,e,n){var o=t;return[[/(url\("?)\.\.\/\.\.\/\.\./g,function(t,n){return"".concat(n).concat(e)}],[/(url\("?)\.\.\/webfonts/g,function(t,o){return"".concat(o).concat(e,"/releases/v").concat(n,"/webfonts")}],[/(url\("?)https:\/\/kit-free([^.])*\.fontawesome\.com/g,function(t,n){return"".concat(n).concat(e)}]].forEach(function(t){var e=r(t,2),n=e[0],i=e[1];o=o.replace(n,i)}),o}function C(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){},r=e.document||r,i=a.bind(a,r,["fa","fab","fas","far","fal","fad","fak"]),u=Object.keys(t.iconUploads||{}).length>0;t.autoA11y.enabled&&n(i);var f=[{id:"fa-main",addOn:void 0}];t.v4shim&&t.v4shim.enabled&&f.push({id:"fa-v4-shims",addOn:"-v4-shims"}),t.v5FontFaceShim&&t.v5FontFaceShim.enabled&&f.push({id:"fa-v5-font-face",addOn:"-v5-font-face"}),t.v4FontFaceShim&&t.v4FontFaceShim.enabled&&f.push({id:"fa-v4-font-face",addOn:"-v4-font-face"}),u&&f.push({id:"fa-kit-upload",customCss:!0});var s=f.map(function(n){return new F(function(r,i){E(n.customCss?function(t){return t.baseUrlKit+"/"+t.token+"/"+t.id+"/kit-upload.css"}(t):c(t,{addOn:n.addOn,minify:t.minify.enabled}),e).then(function(i){r(function(t,e){var n=e.contentFilter||function(t,e){return t},o=document.createElement("style"),r=document.createTextNode(n(t,e));return o.appendChild(r),o.media="all",e.id&&o.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&o.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),o}(i,o(o({},e),{},{baseUrl:t.baseUrl,version:t.version,id:n.id,contentFilter:function(t,e){return _(t,e.baseUrl,e.version)}})))}).catch(i)})});return F.all(s)}function P(t,e){var n=document.createElement("SCRIPT"),o=document.createTextNode(t);return n.appendChild(o),n.referrerPolicy="strict-origin",e.id&&n.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n}function U(t){var e,n=[],o=document,r=(o.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(o.readyState);r||o.addEventListener("DOMContentLoaded",e=function(){for(o.removeEventListener("DOMContentLoaded",e),r=1;e=n.shift();)e()}),r?setTimeout(t,0):n.push(t)}try{if(window.FontAwesomeKitConfig){var k=window.FontAwesomeKitConfig,L={detectingConflicts:k.detectConflictsUntil&&new Date<=new Date(k.detectConflictsUntil),detectionIgnoreAttr:"data-fa-detection-ignore",fetch:window.fetch,token:k.token,XMLHttpRequest:window.XMLHttpRequest,document:document},I=document.currentScript,T=I?I.parentElement:document.head;(function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"js"===t.method?function(t,e){e.autoA11y=t.autoA11y.enabled,"pro"===t.license&&(e.autoFetchSvg=!0,e.fetchSvgFrom=t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/svgs",e.fetchUploadedSvgFrom=t.uploadsUrl);var n=[];return t.v4shim.enabled&&n.push(new F(function(n,r){E(c(t,{addOn:"-v4-shims",minify:t.minify.enabled}),e).then(function(t){n(P(t,o(o({},e),{},{id:"fa-v4-shims"})))}).catch(r)})),n.push(new F(function(n,r){E(c(t,{minify:t.minify.enabled}),e).then(function(t){var r=P(t,o(o({},e),{},{id:"fa-main"}));n(function(t,e){var n=e&&void 0!==e.autoFetchSvg?e.autoFetchSvg:void 0,o=e&&void 0!==e.autoA11y?e.autoA11y:void 0;return void 0!==o&&t.setAttribute("data-auto-a11y",o?"true":"false"),n&&(t.setAttributeNode(document.createAttribute("data-auto-fetch-svg")),t.setAttribute("data-fetch-svg-from",e.fetchSvgFrom),t.setAttribute("data-fetch-uploaded-svg-from",e.fetchUploadedSvgFrom)),t}(r,e))}).catch(r)})),F.all(n)}(t,e):"css"===t.method?C(t,e,function(t){U(t),function(t){"undefined"!=typeof MutationObserver&&new MutationObserver(t).observe(document,{childList:!0,subtree:!0})}(t)}):void 0})(k,L).then(function(t){t.map(function(t){try{T.insertBefore(t,I?I.nextSibling:null)}catch(e){T.appendChild(t)}}),L.detectingConflicts&&I&&U(function(){I.setAttributeNode(document.createAttribute(L.detectionIgnoreAttr));var t=function(t,e){var n=document.createElement("script");return e&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n.src=c(t,{baseFilename:"conflict-detection",fileSuffix:"js",subdir:"js",minify:t.minify.enabled}),n}(k,L);document.body.appendChild(t)})}).catch(function(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))})}}catch(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))}});
 
 
spaces/Cpp4App/Cpp4App/CDM/result_processing/view_gt.py DELETED
@@ -1,89 +0,0 @@
1
- from tqdm import tqdm
2
- import json
3
- import cv2
4
- from os.path import join as pjoin
5
-
6
- from config.CONFIG_UIED import Config
7
- C = Config()
8
-
9
-
10
- def draw_bounding_box_class(org, components, color=C.COLOR, line=2, show=False, write_path=None):
11
- """
12
- Draw bounding box of components with their classes on the original image
13
- :param org: original image
14
- :param components: bbox [(column_min, row_min, column_max, row_max)]
15
- -> top_left: (column_min, row_min)
16
- -> bottom_right: (column_max, row_max)
17
- :param color_map: colors mapping to different components
18
- :param line: line thickness
19
- :param compo_class: classes matching the corners of components
20
- :param show: show or not
21
- :return: labeled image
22
- """
23
- board = org.copy()
24
- bboxes = components['bboxes']
25
- categories = components['categories']
26
- for i in range(len(bboxes)):
27
- bbox = bboxes[i]
28
- category = categories[i]
29
- board = cv2.rectangle(board, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color[C.CLASS_MAP[str(category)]], line)
30
- board = cv2.putText(board, C.CLASS_MAP[str(category)], (bbox[0]+5, bbox[1]+20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color[C.CLASS_MAP[str(category)]], 2)
31
- if show:
32
- cv2.imshow('a', cv2.resize(board, (500, 1000)))
33
- cv2.waitKey(0)
34
- if write_path is not None:
35
- cv2.imwrite(write_path, board)
36
- return board
37
-
38
-
39
- def load_ground_truth_json(gt_file, no_text=True):
40
- def get_img_by_id(img_id):
41
- for image in images:
42
- if image['id'] == img_id:
43
- return image['file_name'].split('/')[-1][:-4], (image['height'], image['width'])
44
-
45
- def cvt_bbox(bbox):
46
- '''
47
- :param bbox: [x,y,width,height]
48
- :return: [col_min, row_min, col_max, row_max]
49
- '''
50
- bbox = [int(b) for b in bbox]
51
- return [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]
52
-
53
- data = json.load(open(gt_file, 'r'))
54
- images = data['images']
55
- annots = data['annotations']
56
- compos = {}
57
- print('Loading %d ground truth' % len(annots))
58
- for annot in tqdm(annots):
59
- img_name, size = get_img_by_id(annot['image_id'])
60
- if no_text and int(annot['category_id']) == 14:
61
- compos[img_name] = {'bboxes': [], 'categories': [], 'size': size}
62
- continue
63
- if img_name not in compos:
64
- compos[img_name] = {'bboxes': [cvt_bbox(annot['bbox'])], 'categories': [annot['category_id']], 'size':size}
65
- else:
66
- compos[img_name]['bboxes'].append(cvt_bbox(annot['bbox']))
67
- compos[img_name]['categories'].append(annot['category_id'])
68
- return compos
69
-
70
-
71
- def view_gt_all(gt, img_root):
72
- for img_id in gt:
73
- compos = gt[img_id]
74
- img = cv2.imread(pjoin(img_root, img_id + '.jpg'))
75
- print(pjoin(img_root, img_id + '.jpg'))
76
- draw_bounding_box_class(img, compos, show=True)
77
-
78
-
79
- def view_gt_single(gt, img_root, img_id):
80
- img_id = str(img_id)
81
- compos = gt[img_id]
82
- img = cv2.imread(pjoin(img_root, img_id + '.jpg'))
83
- print(pjoin(img_root, img_id + '.jpg'))
84
- draw_bounding_box_class(img, compos, show=True)
85
-
86
-
87
- gt = load_ground_truth_json('E:\\Mulong\\Datasets\\rico\\instances_test.json', no_text=False)
88
- # view_gt_all(gt, 'E:\\Mulong\\Datasets\\rico\\combined')
89
- view_gt_single(gt, 'E:\\Mulong\\Datasets\\rico\\combined', 670)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/_utils.py DELETED
@@ -1,39 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import glob
3
- import os.path
4
-
5
- import torch
6
-
7
- try:
8
- from torch.utils.cpp_extension import load as load_ext
9
- from torch.utils.cpp_extension import CUDA_HOME
10
- except ImportError:
11
- raise ImportError("The cpp layer extensions requires PyTorch 0.4 or higher")
12
-
13
-
14
- def _load_C_extensions():
15
- this_dir = os.path.dirname(os.path.abspath(__file__))
16
- this_dir = os.path.dirname(this_dir)
17
- this_dir = os.path.join(this_dir, "csrc")
18
-
19
- main_file = glob.glob(os.path.join(this_dir, "*.cpp"))
20
- source_cpu = glob.glob(os.path.join(this_dir, "cpu", "*.cpp"))
21
- source_cuda = glob.glob(os.path.join(this_dir, "cuda", "*.cu"))
22
-
23
- source = main_file + source_cpu
24
-
25
- extra_cflags = []
26
- if torch.cuda.is_available() and CUDA_HOME is not None:
27
- source.extend(source_cuda)
28
- extra_cflags = ["-DWITH_CUDA"]
29
- source = [os.path.join(this_dir, s) for s in source]
30
- extra_include_paths = [this_dir]
31
- return load_ext(
32
- "torchvision",
33
- source,
34
- extra_cflags=extra_cflags,
35
- extra_include_paths=extra_include_paths,
36
- )
37
-
38
-
39
- _C = _load_C_extensions()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/bar_plot.py DELETED
@@ -1,377 +0,0 @@
1
- """gr.BarPlot() component."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import Callable, Literal
6
-
7
- import altair as alt
8
- import pandas as pd
9
- from gradio_client.documentation import document, set_documentation_group
10
-
11
- from gradio.components.base import _Keywords
12
- from gradio.components.plot import AltairPlot, Plot
13
-
14
- set_documentation_group("component")
15
-
16
-
17
- @document()
18
- class BarPlot(Plot):
19
- """
20
- Create a bar plot.
21
-
22
- Preprocessing: this component does *not* accept input.
23
- Postprocessing: expects a pandas dataframe with the data to plot.
24
-
25
- Demos: bar_plot, chicago-bikeshare-dashboard
26
- """
27
-
28
- def __init__(
29
- self,
30
- value: pd.DataFrame | Callable | None = None,
31
- x: str | None = None,
32
- y: str | None = None,
33
- *,
34
- color: str | None = None,
35
- vertical: bool = True,
36
- group: str | None = None,
37
- title: str | None = None,
38
- tooltip: list[str] | str | None = None,
39
- x_title: str | None = None,
40
- y_title: str | None = None,
41
- color_legend_title: str | None = None,
42
- group_title: str | None = None,
43
- color_legend_position: Literal[
44
- "left",
45
- "right",
46
- "top",
47
- "bottom",
48
- "top-left",
49
- "top-right",
50
- "bottom-left",
51
- "bottom-right",
52
- "none",
53
- ]
54
- | None = None,
55
- height: int | None = None,
56
- width: int | None = None,
57
- y_lim: list[int] | None = None,
58
- caption: str | None = None,
59
- interactive: bool | None = True,
60
- label: str | None = None,
61
- show_label: bool | None = None,
62
- container: bool = True,
63
- scale: int | None = None,
64
- min_width: int = 160,
65
- every: float | None = None,
66
- visible: bool = True,
67
- elem_id: str | None = None,
68
- elem_classes: list[str] | str | None = None,
69
- ):
70
- """
71
- Parameters:
72
- value: The pandas dataframe containing the data to display in a scatter plot.
73
- x: Column corresponding to the x axis.
74
- y: Column corresponding to the y axis.
75
- color: The column to determine the bar color. Must be categorical (discrete values).
76
- vertical: If True, the bars will be displayed vertically. If False, the x and y axis will be switched, displaying the bars horizontally. Default is True.
77
- group: The column with which to split the overall plot into smaller subplots.
78
- title: The title to display on top of the chart.
79
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers over a bar.
80
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
81
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
82
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
83
- group_title: The label displayed on top of the subplot columns (or rows if vertical=True). Use an empty string to omit.
84
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
85
- height: The height of the plot in pixels.
86
- width: The width of the plot in pixels.
87
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
88
- caption: The (optional) caption to display below the plot.
89
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
90
- label: The (optional) label to display on the top left corner of the plot.
91
- show_label: Whether the label should be displayed.
92
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
93
- visible: Whether the plot should be visible.
94
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
95
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
96
- """
97
- self.x = x
98
- self.y = y
99
- self.color = color
100
- self.vertical = vertical
101
- self.group = group
102
- self.group_title = group_title
103
- self.tooltip = tooltip
104
- self.title = title
105
- self.x_title = x_title
106
- self.y_title = y_title
107
- self.color_legend_title = color_legend_title
108
- self.group_title = group_title
109
- self.color_legend_position = color_legend_position
110
- self.y_lim = y_lim
111
- self.caption = caption
112
- self.interactive_chart = interactive
113
- self.width = width
114
- self.height = height
115
- super().__init__(
116
- value=value,
117
- label=label,
118
- show_label=show_label,
119
- container=container,
120
- scale=scale,
121
- min_width=min_width,
122
- visible=visible,
123
- elem_id=elem_id,
124
- elem_classes=elem_classes,
125
- every=every,
126
- )
127
-
128
- def get_config(self):
129
- config = super().get_config()
130
- config["caption"] = self.caption
131
- return config
132
-
133
- def get_block_name(self) -> str:
134
- return "plot"
135
-
136
- @staticmethod
137
- def update(
138
- value: pd.DataFrame | dict | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE,
139
- x: str | None = None,
140
- y: str | None = None,
141
- color: str | None = None,
142
- vertical: bool = True,
143
- group: str | None = None,
144
- title: str | None = None,
145
- tooltip: list[str] | str | None = None,
146
- x_title: str | None = None,
147
- y_title: str | None = None,
148
- color_legend_title: str | None = None,
149
- group_title: str | None = None,
150
- color_legend_position: Literal[
151
- "left",
152
- "right",
153
- "top",
154
- "bottom",
155
- "top-left",
156
- "top-right",
157
- "bottom-left",
158
- "bottom-right",
159
- "none",
160
- ]
161
- | None = None,
162
- height: int | None = None,
163
- width: int | None = None,
164
- y_lim: list[int] | None = None,
165
- caption: str | None = None,
166
- interactive: bool | None = None,
167
- label: str | None = None,
168
- show_label: bool | None = None,
169
- container: bool | None = None,
170
- scale: int | None = None,
171
- min_width: int | None = None,
172
- visible: bool | None = None,
173
- ):
174
- """Update an existing BarPlot component.
175
-
176
- If updating any of the plot properties (color, size, etc) the value, x, and y parameters must be specified.
177
-
178
- Parameters:
179
- value: The pandas dataframe containing the data to display in a scatter plot.
180
- x: Column corresponding to the x axis.
181
- y: Column corresponding to the y axis.
182
- color: The column to determine the bar color. Must be categorical (discrete values).
183
- vertical: If True, the bars will be displayed vertically. If False, the x and y axis will be switched, displaying the bars horizontally. Default is True.
184
- group: The column with which to split the overall plot into smaller subplots.
185
- title: The title to display on top of the chart.
186
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers over a bar.
187
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
188
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
189
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
190
- group_title: The label displayed on top of the subplot columns (or rows if vertical=True). Use an empty string to omit.
191
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
192
- height: The height of the plot in pixels.
193
- width: The width of the plot in pixels.
194
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
195
- caption: The (optional) caption to display below the plot.
196
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
197
- label: The (optional) label to display on the top left corner of the plot.
198
- show_label: Whether the label should be displayed.
199
- visible: Whether the plot should be visible.
200
- """
201
- properties = [
202
- x,
203
- y,
204
- color,
205
- vertical,
206
- group,
207
- title,
208
- tooltip,
209
- x_title,
210
- y_title,
211
- color_legend_title,
212
- group_title,
213
- color_legend_position,
214
- height,
215
- width,
216
- y_lim,
217
- interactive,
218
- ]
219
- if any(properties):
220
- if not isinstance(value, pd.DataFrame):
221
- raise ValueError(
222
- "In order to update plot properties the value parameter "
223
- "must be provided, and it must be a Dataframe. Please pass a value "
224
- "parameter to gr.BarPlot.update."
225
- )
226
- if x is None or y is None:
227
- raise ValueError(
228
- "In order to update plot properties, the x and y axis data "
229
- "must be specified. Please pass valid values for x an y to "
230
- "gr.BarPlot.update."
231
- )
232
- chart = BarPlot.create_plot(value, *properties)
233
- value = {"type": "altair", "plot": chart.to_json(), "chart": "bar"}
234
-
235
- updated_config = {
236
- "label": label,
237
- "show_label": show_label,
238
- "container": container,
239
- "scale": scale,
240
- "min_width": min_width,
241
- "visible": visible,
242
- "value": value,
243
- "caption": caption,
244
- "__type__": "update",
245
- }
246
- return updated_config
247
-
248
- @staticmethod
249
- def create_plot(
250
- value: pd.DataFrame,
251
- x: str,
252
- y: str,
253
- color: str | None = None,
254
- vertical: bool = True,
255
- group: str | None = None,
256
- title: str | None = None,
257
- tooltip: list[str] | str | None = None,
258
- x_title: str | None = None,
259
- y_title: str | None = None,
260
- color_legend_title: str | None = None,
261
- group_title: str | None = None,
262
- color_legend_position: Literal[
263
- "left",
264
- "right",
265
- "top",
266
- "bottom",
267
- "top-left",
268
- "top-right",
269
- "bottom-left",
270
- "bottom-right",
271
- "none",
272
- ]
273
- | None
274
- | None = None,
275
- height: int | None = None,
276
- width: int | None = None,
277
- y_lim: list[int] | None = None,
278
- interactive: bool | None = True,
279
- ):
280
- """Helper for creating the bar plot."""
281
- interactive = True if interactive is None else interactive
282
- orientation = (
283
- {"field": group, "title": group_title if group_title is not None else group}
284
- if group
285
- else {}
286
- )
287
-
288
- x_title = x_title or x
289
- y_title = y_title or y
290
-
291
- # If horizontal, switch x and y
292
- if not vertical:
293
- y, x = x, y
294
- x = f"sum({x}):Q"
295
- y_title, x_title = x_title, y_title
296
- orientation = {"row": alt.Row(**orientation)} if orientation else {} # type: ignore
297
- x_lim = y_lim
298
- y_lim = None
299
- else:
300
- y = f"sum({y}):Q"
301
- x_lim = None
302
- orientation = {"column": alt.Column(**orientation)} if orientation else {} # type: ignore
303
-
304
- encodings = dict(
305
- x=alt.X(
306
- x, # type: ignore
307
- title=x_title, # type: ignore
308
- scale=AltairPlot.create_scale(x_lim), # type: ignore
309
- ),
310
- y=alt.Y(
311
- y, # type: ignore
312
- title=y_title, # type: ignore
313
- scale=AltairPlot.create_scale(y_lim), # type: ignore
314
- ),
315
- **orientation,
316
- )
317
- properties = {}
318
- if title:
319
- properties["title"] = title
320
- if height:
321
- properties["height"] = height
322
- if width:
323
- properties["width"] = width
324
-
325
- if color:
326
- domain = value[color].unique().tolist()
327
- range_ = list(range(len(domain)))
328
- encodings["color"] = {
329
- "field": color,
330
- "type": "nominal",
331
- "scale": {"domain": domain, "range": range_},
332
- "legend": AltairPlot.create_legend(
333
- position=color_legend_position, title=color_legend_title or color
334
- ),
335
- }
336
-
337
- if tooltip:
338
- encodings["tooltip"] = tooltip
339
-
340
- chart = (
341
- alt.Chart(value) # type: ignore
342
- .mark_bar() # type: ignore
343
- .encode(**encodings)
344
- .properties(background="transparent", **properties)
345
- )
346
- if interactive:
347
- chart = chart.interactive()
348
-
349
- return chart
350
-
351
- def postprocess(self, y: pd.DataFrame | dict | None) -> dict[str, str] | None:
352
- # if None or update
353
- if y is None or isinstance(y, dict):
354
- return y
355
- if self.x is None or self.y is None:
356
- raise ValueError("No value provided for required parameters `x` and `y`.")
357
- chart = self.create_plot(
358
- value=y,
359
- x=self.x,
360
- y=self.y,
361
- color=self.color,
362
- vertical=self.vertical,
363
- group=self.group,
364
- title=self.title,
365
- tooltip=self.tooltip,
366
- x_title=self.x_title,
367
- y_title=self.y_title,
368
- color_legend_title=self.color_legend_title,
369
- color_legend_position=self.color_legend_position,
370
- group_title=self.group_title,
371
- y_lim=self.y_lim,
372
- interactive=self.interactive_chart,
373
- height=self.height,
374
- width=self.width,
375
- )
376
-
377
- return {"type": "altair", "plot": chart.to_json(), "chart": "bar"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-f8a15c0a.js DELETED
@@ -1,2 +0,0 @@
1
- import{E as W,C as Y,L as d}from"./index-ae57ca19.js";import{s as n,t as r,L as R,i as Z,d as a,f as X,a as y,b as f}from"./index-f90e1963.js";import"./index-3370be2a.js";import"./Blocks-f0129fcd.js";import"./Button-89624748.js";import"./BlockLabel-56db415e.js";import"./Empty-585389a4.js";import"./Copy-6cd42558.js";import"./Download-fdaaf5d4.js";const l=1,w=189,S=190,b=191,T=192,U=193,m=194,V=22,g=23,h=47,G=48,c=53,u=54,_=55,j=57,E=58,k=59,z=60,v=61,H=63,N=230,A=71,F=255,K=121,C=142,D=143,M=146,s=10,i=13,t=32,o=9,q=35,L=40,B=46,J=new Set([g,h,G,F,H,K,u,_,N,z,v,E,k,A,C,D,M]),OO=new W((O,$)=>{if(O.next<0)O.acceptToken(m);else if(!(O.next!=s&&O.next!=i))if($.context.depth<0)O.acceptToken(T,1);else{O.advance();let Q=0;for(;O.next==t||O.next==o;)O.advance(),Q++;let P=O.next==s||O.next==i||O.next==q;O.acceptToken(P?U:b,-Q)}},{contextual:!0,fallback:!0}),$O=new W((O,$)=>{let Q=$.context.depth;if(Q<0)return;let P=O.peek(-1);if((P==s||P==i)&&$.context.depth>=0){let e=0,x=0;for(;;){if(O.next==t)e++;else if(O.next==o)e+=8-e%8;else break;O.advance(),x++}e!=Q&&O.next!=s&&O.next!=i&&O.next!=q&&(e<Q?O.acceptToken(S,-x):O.acceptToken(w))}});function p(O,$){this.parent=O,this.depth=$,this.hash=(O?O.hash+O.hash<<8:0)+$+($<<4)}const rO=new p(null,0);function QO(O){let $=0;for(let Q=0;Q<O.length;Q++)$+=O.charCodeAt(Q)==o?8-$%8:1;return $}const PO=new Y({start:rO,reduce(O,$){return O.depth<0&&J.has($)?O.parent:O},shift(O,$,Q,P){return $==w?new p(O,QO(P.read(P.pos,Q.pos))):$==S?O.parent:$==V||$==c||$==j?new p(O,-1):O},hash(O){return O.hash}}),eO=new W(O=>{for(let $=0;$<5;$++){if(O.next!="print".charCodeAt($))return;O.advance()}if(!/\w/.test(String.fromCharCode(O.next)))for(let $=0;;$++){let Q=O.peek($);if(!(Q==t||Q==o)){Q!=L&&Q!=B&&Q!=s&&Q!=i&&Q!=q&&O.acceptToken(l);return}}}),sO=n({'async "*" "**" FormatConversion FormatSpec':r.modifier,"for while if elif else try except finally return raise break continue with pass assert await yield match case":r.controlKeyword,"in not and or is del":r.operatorKeyword,"from def class global nonlocal lambda":r.definitionKeyword,import:r.moduleKeyword,"with as print":r.keyword,Boolean:r.bool,None:r.null,VariableName:r.variableName,"CallExpression/VariableName":r.function(r.variableName),"FunctionDefinition/VariableName":r.function(r.definition(r.variableName)),"ClassDefinition/VariableName":r.definition(r.className),PropertyName:r.propertyName,"CallExpression/MemberExpression/PropertyName":r.function(r.propertyName),Comment:r.lineComment,Number:r.number,String:r.string,FormatString:r.special(r.string),UpdateOp:r.updateOperator,ArithOp:r.arithmeticOperator,BitOp:r.bitwiseOperator,CompareOp:r.compareOperator,AssignOp:r.definitionOperator,Ellipsis:r.punctuation,At:r.meta,"( )":r.paren,"[ ]":r.squareBracket,"{ }":r.brace,".":r.derefOperator,", ;":r.separator}),iO={__proto__:null,await:40,or:50,and:52,in:56,not:58,is:60,if:66,else:68,lambda:72,yield:90,from:92,async:98,for:100,None:152,True:154,False:154,del:168,pass:172,break:176,continue:180,return:184,raise:192,import:196,as:198,global:202,nonlocal:204,assert:208,elif:218,while:222,try:228,except:230,finally:232,with:236,def:240,class:250,match:261,case:267},oO=d.deserialize({version:14,states:"!L`O`Q$IXOOO%fQ$I[O'#G|OOQ$IS'#Cm'#CmOOQ$IS'#Cn'#CnO'UQ$IWO'#ClO(wQ$I[O'#G{OOQ$IS'#G|'#G|OOQ$IS'#DS'#DSOOQ$IS'#G{'#G{O)eQ$IWO'#CsO)uQ$IWO'#DdO*VQ$IWO'#DhOOQ$IS'#Ds'#DsO*jO`O'#DsO*rOpO'#DsO*zO!bO'#DtO+VO#tO'#DtO+bO&jO'#DtO+mO,UO'#DtO-oQ$I[O'#GmOOQ$IS'#Gm'#GmO'UQ$IWO'#GlO/RQ$I[O'#GlOOQ$IS'#E]'#E]O/jQ$IWO'#E^OOQ$IS'#Gk'#GkO/tQ$IWO'#GjOOQ$IV'#Gj'#GjO0PQ$IWO'#FPOOQ$IS'#GX'#GXO0UQ$IWO'#FOOOQ$IV'#Hx'#HxOOQ$IV'#Gi'#GiOOQ$IT'#Fh'#FhQ`Q$IXOOO'UQ$IWO'#CoO0dQ$IWO'#C{O0kQ$IWO'#DPO0yQ$IWO'#HQO1ZQ$I[O'#EQO'UQ$IWO'#EROOQ$IS'#ET'#ETOOQ$IS'#EV'#EVOOQ$IS'#EX'#EXO1oQ$IWO'#EZO2VQ$IWO'#E_O0PQ$IWO'#EaO2jQ$I[O'#EaO0PQ$IWO'#EdO/jQ$IWO'#EgO/jQ$IWO'#EkO/jQ$IWO'#EnO2uQ$IWO'#EpO2|Q$IWO'#EuO3XQ$IWO'#EqO/jQ$IWO'#EuO0PQ$IWO'#EwO0PQ$IWO'#E|O3^Q$IWO'#FROOQ$IS'#Cc'#CcOOQ$IS'#Cd'#CdOOQ$IS'#Ce'#CeOOQ$IS'#Cf'#CfOOQ$IS'#Cg'#CgOOQ$IS'#Ch'#ChOOQ$IS'#Cj'#CjO'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O'UQ$IWO,58|O3eQ$IWO'#DmOOQ$IS,5:W,5:WO3xQ$IWO'#H[OOQ$IS,5:Z,5:ZO4VQ%1`O,5:ZO4[Q$I[O,59WO0dQ$IWO,59`O0dQ$IWO,59`O0dQ$IWO,59`O6zQ$IWO,59`O7PQ$IWO,59`O7WQ$IWO,59hO7_Q$IWO'#G{O8eQ$IWO'#GzOOQ$IS'#Gz'#GzOOQ$IS'#DY'#DYO8|Q$IWO,59_O'UQ$IWO,59_O9[Q$IWO,59_O9aQ$IWO,5:PO'UQ$IWO,5:POOQ$IS,5:O,5:OO9oQ$IWO,5:OO9tQ$IWO,5:VO'UQ$IWO,5:VO'UQ$IWO,5:TOOQ$IS,5:S,5:SO:VQ$IWO,5:SO:[Q$IWO,5:UOOOO'#Fp'#FpO:aO`O,5:_OOQ$IS,5:_,5:_OOOO'#Fq'#FqO:iOpO,5:_O:qQ$IWO'#DuOOOO'#Fr'#FrO;RO!bO,5:`OOQ$IS,5:`,5:`OOOO'#Fu'#FuO;^O#tO,5:`OOOO'#Fv'#FvO;iO&jO,5:`OOOO'#Fw'#FwO;tO,UO,5:`OOQ$IS'#Fx'#FxO<PQ$I[O,5:dO>qQ$I[O,5=WO?[Q%GlO,5=WO?{Q$I[O,5=WOOQ$IS,5:x,5:xO@dQ$IXO'#GQOAsQ$IWO,5;TOOQ$IV,5=U,5=UOBOQ$I[O'#HtOBgQ$IWO,5;kOOQ$IS-E:V-E:VOOQ$IV,5;j,5;jO3SQ$IWO'#EwOOQ$IT-E9f-E9fOBoQ$I[O,59ZODvQ$I[O,59gOEaQ$IWO'#G}OElQ$IWO'#G}O0PQ$IWO'#G}OEwQ$IWO'#DROFPQ$IWO,59kOFUQ$IWO'#HRO'UQ$IWO'#HRO/jQ$IWO,5=lOOQ$IS,5=l,5=lO/jQ$IWO'#D|OOQ$IS'#D}'#D}OFsQ$IWO'#FzOGTQ$IWO,58zOGTQ$IWO,58zO)hQ$IWO,5:jOGcQ$I[O'#HTOOQ$IS,5:m,5:mOOQ$IS,5:u,5:uOGvQ$IWO,5:yOHXQ$IWO,5:{OOQ$IS'#F}'#F}OHgQ$I[O,5:{OHuQ$IWO,5:{OHzQ$IWO'#HwOOQ$IS,5;O,5;OOIYQ$IWO'#HsOOQ$IS,5;R,5;RO3XQ$IWO,5;VO3XQ$IWO,5;YOIkQ$I[O'#HyO'UQ$IWO'#HyOIuQ$IWO,5;[O2uQ$IWO,5;[O/jQ$IWO,5;aO0PQ$IWO,5;cOIzQ$IXO'#ElOKTQ$IZO,5;]ONiQ$IWO'#HzO3XQ$IWO,5;aONtQ$IWO,5;cONyQ$IWO,5;hO! RQ$I[O,5;mO'UQ$IWO,5;mO!#uQ$I[O1G.hO!#|Q$I[O1G.hO!&mQ$I[O1G.hO!&wQ$I[O1G.hO!)bQ$I[O1G.hO!)uQ$I[O1G.hO!*YQ$IWO'#HZO!*hQ$I[O'#GmO/jQ$IWO'#HZO!*rQ$IWO'#HYOOQ$IS,5:X,5:XO!*zQ$IWO,5:XO!+PQ$IWO'#H]O!+[Q$IWO'#H]O!+oQ$IWO,5=vOOQ$IS'#Dq'#DqOOQ$IS1G/u1G/uOOQ$IS1G.z1G.zO!,oQ$I[O1G.zO!,vQ$I[O1G.zO0dQ$IWO1G.zO!-cQ$IWO1G/SOOQ$IS'#DX'#DXO/jQ$IWO,59rOOQ$IS1G.y1G.yO!-jQ$IWO1G/cO!-zQ$IWO1G/cO!.SQ$IWO1G/dO'UQ$IWO'#HSO!.XQ$IWO'#HSO!.^Q$I[O1G.yO!.nQ$IWO,59gO!/tQ$IWO,5=rO!0UQ$IWO,5=rO!0^Q$IWO1G/kO!0cQ$I[O1G/kOOQ$IS1G/j1G/jO!0sQ$IWO,5=mO!1jQ$IWO,5=mO/jQ$IWO1G/oO!2XQ$IWO1G/qO!2^Q$I[O1G/qO!2nQ$I[O1G/oOOQ$IS1G/n1G/nOOQ$IS1G/p1G/pOOOO-E9n-E9nOOQ$IS1G/y1G/yOOOO-E9o-E9oO!3OQ$IWO'#HhO/jQ$IWO'#HhO!3^Q$IWO,5:aOOOO-E9p-E9pOOQ$IS1G/z1G/zOOOO-E9s-E9sOOOO-E9t-E9tOOOO-E9u-E9uOOQ$IS-E9v-E9vO!3iQ%GlO1G2rO!4YQ$I[O1G2rO'UQ$IWO,5<eOOQ$IS,5<e,5<eOOQ$IS-E9w-E9wOOQ$IS,5<l,5<lOOQ$IS-E:O-E:OOOQ$IV1G0o1G0oO0PQ$IWO'#F|O!4qQ$I[O,5>`OOQ$IS1G1V1G1VO!5YQ$IWO1G1VOOQ$IS'#DT'#DTO/jQ$IWO,5=iOOQ$IS,5=i,5=iO!5_Q$IWO'#FiO!5jQ$IWO,59mO!5rQ$IWO1G/VO!5|Q$I[O,5=mOOQ$IS1G3W1G3WOOQ$IS,5:h,5:hO!6mQ$IWO'#GlOOQ$IS,5<f,5<fOOQ$IS-E9x-E9xO!7OQ$IWO1G.fOOQ$IS1G0U1G0UO!7^Q$IWO,5=oO!7nQ$IWO,5=oO/jQ$IWO1G0eO/jQ$IWO1G0eO0PQ$IWO1G0gOOQ$IS-E9{-E9{O!8PQ$IWO1G0gO!8[Q$IWO1G0gO!8aQ$IWO,5>cO!8oQ$IWO,5>cO!8}Q$IWO,5>_O!9eQ$IWO,5>_O!9vQ$IZO1G0qO!=XQ$IZO1G0tO!@gQ$IWO,5>eO!@qQ$IWO,5>eO!@yQ$I[O,5>eO/jQ$IWO1G0vO!ATQ$IWO1G0vO3XQ$IWO1G0{ONtQ$IWO1G0}OOQ$IV,5;W,5;WO!AYQ$IYO,5;WO!A_Q$IZO1G0wO!DsQ$IWO'#GUO3XQ$IWO1G0wO3XQ$IWO1G0wO!EQQ$IWO,5>fO!E_Q$IWO,5>fO0PQ$IWO,5>fOOQ$IV1G0{1G0{O!EgQ$IWO'#EyO!ExQ%1`O1G0}OOQ$IV1G1S1G1SO3XQ$IWO1G1SO!FQQ$IWO'#FTOOQ$IV1G1X1G1XO! RQ$I[O1G1XOOQ$IS,5=u,5=uOOQ$IS'#Dn'#DnO/jQ$IWO,5=uO!FVQ$IWO,5=tO!FjQ$IWO,5=tOOQ$IS1G/s1G/sO!FrQ$IWO,5=wO!GSQ$IWO,5=wO!G[Q$IWO,5=wO!GoQ$IWO,5=wO!HPQ$IWO,5=wOOQ$IS1G3b1G3bOOQ$IS7+$f7+$fO!5rQ$IWO7+$nO!IrQ$IWO1G.zO!IyQ$IWO1G.zOOQ$IS1G/^1G/^OOQ$IS,5<V,5<VO'UQ$IWO,5<VOOQ$IS7+$}7+$}O!JQQ$IWO7+$}OOQ$IS-E9i-E9iOOQ$IS7+%O7+%OO!JbQ$IWO,5=nO'UQ$IWO,5=nOOQ$IS7+$e7+$eO!JgQ$IWO7+$}O!JoQ$IWO7+%OO!JtQ$IWO1G3^OOQ$IS7+%V7+%VO!KUQ$IWO1G3^O!K^Q$IWO7+%VOOQ$IS,5<U,5<UO'UQ$IWO,5<UO!KcQ$IWO1G3XOOQ$IS-E9h-E9hO!LYQ$IWO7+%ZOOQ$IS7+%]7+%]O!LhQ$IWO1G3XO!MVQ$IWO7+%]O!M[Q$IWO1G3_O!MlQ$IWO1G3_O!MtQ$IWO7+%ZO!MyQ$IWO,5>SO!NaQ$IWO,5>SO!NaQ$IWO,5>SO!NoO!LQO'#DwO!NzOSO'#HiOOOO1G/{1G/{O# PQ$IWO1G/{O# XQ%GlO7+(^O# xQ$I[O1G2PP#!cQ$IWO'#FyOOQ$IS,5<h,5<hOOQ$IS-E9z-E9zOOQ$IS7+&q7+&qOOQ$IS1G3T1G3TOOQ$IS,5<T,5<TOOQ$IS-E9g-E9gOOQ$IS7+$q7+$qO#!pQ$IWO,5=WO##ZQ$IWO,5=WO##lQ$I[O,5<WO#$PQ$IWO1G3ZOOQ$IS-E9j-E9jOOQ$IS7+&P7+&PO#$aQ$IWO7+&POOQ$IS7+&R7+&RO#$oQ$IWO'#HvO0PQ$IWO'#HuO#%TQ$IWO7+&ROOQ$IS,5<k,5<kO#%`Q$IWO1G3}OOQ$IS-E9}-E9}OOQ$IS,5<g,5<gO#%nQ$IWO1G3yOOQ$IS-E9y-E9yO#&UQ$IZO7+&]O!DsQ$IWO'#GSO3XQ$IWO7+&]O3XQ$IWO7+&`O#)gQ$I[O,5<oO'UQ$IWO,5<oO#)qQ$IWO1G4POOQ$IS-E:R-E:RO#){Q$IWO1G4PO3XQ$IWO7+&bO/jQ$IWO7+&bOOQ$IV7+&g7+&gO!ExQ%1`O7+&iO#*TQ$IXO1G0rOOQ$IV-E:S-E:SO3XQ$IWO7+&cO3XQ$IWO7+&cOOQ$IV,5<p,5<pO#+yQ$IWO,5<pOOQ$IV7+&c7+&cO#,UQ$IZO7+&cO#/dQ$IWO,5<qO#/oQ$IWO1G4QOOQ$IS-E:T-E:TO#/|Q$IWO1G4QO#0UQ$IWO'#H|O#0dQ$IWO'#H|O0PQ$IWO'#H|OOQ$IS'#H|'#H|O#0oQ$IWO'#H{OOQ$IS,5;e,5;eO#0wQ$IWO,5;eO/jQ$IWO'#E{OOQ$IV7+&i7+&iO3XQ$IWO7+&iOOQ$IV7+&n7+&nO#0|Q$IYO,5;oOOQ$IV7+&s7+&sOOQ$IS1G3a1G3aOOQ$IS,5<Y,5<YO#1RQ$IWO1G3`OOQ$IS-E9l-E9lO#1fQ$IWO,5<ZO#1qQ$IWO,5<ZO#2UQ$IWO1G3cOOQ$IS-E9m-E9mO#2fQ$IWO1G3cO#2nQ$IWO1G3cO#3OQ$IWO1G3cO#2fQ$IWO1G3cOOQ$IS<<HY<<HYO#3ZQ$I[O1G1qOOQ$IS<<Hi<<HiP#3hQ$IWO'#FkO7WQ$IWO1G3YO#3uQ$IWO1G3YO#3zQ$IWO<<HiOOQ$IS<<Hj<<HjO#4[Q$IWO7+(xOOQ$IS<<Hq<<HqO#4lQ$I[O1G1pP#5]Q$IWO'#FjO#5jQ$IWO7+(yO#5zQ$IWO7+(yO#6SQ$IWO<<HuO#6XQ$IWO7+(sOOQ$IS<<Hw<<HwO#7OQ$IWO,5<XO'UQ$IWO,5<XOOQ$IS-E9k-E9kOOQ$IS<<Hu<<HuOOQ$IS,5<_,5<_O/jQ$IWO,5<_O#7TQ$IWO1G3nOOQ$IS-E9q-E9qO#7kQ$IWO1G3nOOOO'#Ft'#FtO#7yO!LQO,5:cOOOO,5>T,5>TOOOO7+%g7+%gO#8UQ$IWO1G2rO#8oQ$IWO1G2rP'UQ$IWO'#FlO/jQ$IWO<<IkO#9QQ$IWO,5>bO#9cQ$IWO,5>bO0PQ$IWO,5>bO#9tQ$IWO,5>aOOQ$IS<<Im<<ImP0PQ$IWO'#GPP/jQ$IWO'#F{OOQ$IV-E:Q-E:QO3XQ$IWO<<IwOOQ$IV,5<n,5<nO3XQ$IWO,5<nOOQ$IV<<Iw<<IwOOQ$IV<<Iz<<IzO#9yQ$I[O1G2ZP#:TQ$IWO'#GTO#:[Q$IWO7+)kO#:fQ$IZO<<I|O3XQ$IWO<<I|OOQ$IV<<JT<<JTO3XQ$IWO<<JTOOQ$IV'#GR'#GRO#=tQ$IZO7+&^OOQ$IV<<I}<<I}O#?pQ$IZO<<I}OOQ$IV1G2[1G2[O0PQ$IWO1G2[O3XQ$IWO<<I}O0PQ$IWO1G2]P/jQ$IWO'#GVO#COQ$IWO7+)lO#C]Q$IWO7+)lOOQ$IS'#Ez'#EzO/jQ$IWO,5>hO#CeQ$IWO,5>hOOQ$IS,5>h,5>hO#CpQ$IWO,5>gO#DRQ$IWO,5>gOOQ$IS1G1P1G1POOQ$IS,5;g,5;gO#DZQ$IWO1G1ZP#D`Q$IWO'#FnO#DpQ$IWO1G1uO#ETQ$IWO1G1uO#EeQ$IWO1G1uP#EpQ$IWO'#FoO#E}Q$IWO7+(}O#F_Q$IWO7+(}O#F_Q$IWO7+(}O#FgQ$IWO7+(}O#FwQ$IWO7+(tO7WQ$IWO7+(tOOQ$ISAN>TAN>TO#GbQ$IWO<<LeOOQ$ISAN>aAN>aO/jQ$IWO1G1sO#GrQ$I[O1G1sP#G|Q$IWO'#FmOOQ$IS1G1y1G1yP#HZQ$IWO'#FsO#HhQ$IWO7+)YOOOO-E9r-E9rO#IOQ$IWO7+(^OOQ$ISAN?VAN?VO#IiQ$IWO,5<jO#I}Q$IWO1G3|OOQ$IS-E9|-E9|O#J`Q$IWO1G3|OOQ$IS1G3{1G3{OOQ$IVAN?cAN?cOOQ$IV1G2Y1G2YO3XQ$IWOAN?hO#JqQ$IZOAN?hOOQ$IVAN?oAN?oOOQ$IV-E:P-E:POOQ$IV<<Ix<<IxO3XQ$IWOAN?iO3XQ$IWO7+'vOOQ$IVAN?iAN?iOOQ$IS7+'w7+'wO#NPQ$IWO<<MWOOQ$IS1G4S1G4SO/jQ$IWO1G4SOOQ$IS,5<r,5<rO#N^Q$IWO1G4ROOQ$IS-E:U-E:UOOQ$IU'#GY'#GYO#NoQ$IYO7+&uO#NzQ$IWO'#FUO$ rQ$IWO7+'aO$!SQ$IWO7+'aOOQ$IS7+'a7+'aO$!_Q$IWO<<LiO$!oQ$IWO<<LiO$!oQ$IWO<<LiO$!wQ$IWO'#HUOOQ$IS<<L`<<L`O$#RQ$IWO<<L`OOQ$IS7+'_7+'_O0PQ$IWO1G2UP0PQ$IWO'#GOO$#lQ$IWO7+)hO$#}Q$IWO7+)hOOQ$IVG25SG25SO3XQ$IWOG25SOOQ$IVG25TG25TOOQ$IV<<Kb<<KbOOQ$IS7+)n7+)nP$$`Q$IWO'#GWOOQ$IU-E:W-E:WOOQ$IV<<Ja<<JaO$%SQ$I[O'#FWOOQ$IS'#FY'#FYO$%dQ$IWO'#FXO$&UQ$IWO'#FXOOQ$IS'#FX'#FXO$&ZQ$IWO'#IOO#NzQ$IWO'#F`O#NzQ$IWO'#F`O$&rQ$IWO'#FaO#NzQ$IWO'#FbO$&yQ$IWO'#IPOOQ$IS'#IP'#IPO$'hQ$IWO,5;pOOQ$IS<<J{<<J{O$'pQ$IWO<<J{O$(QQ$IWOANBTO$(bQ$IWOANBTO$(jQ$IWO'#HVOOQ$IS'#HV'#HVO0kQ$IWO'#DaO$)TQ$IWO,5=pOOQ$ISANAzANAzOOQ$IS7+'p7+'pO$)lQ$IWO<<MSOOQ$IVLD*nLD*nO4VQ%1`O'#G[O$)}Q$I[O,5;yO#NzQ$IWO'#FdOOQ$IS,5;},5;}OOQ$IS'#FZ'#FZO$*oQ$IWO,5;sO$*tQ$IWO,5;sOOQ$IS'#F^'#F^O#NzQ$IWO'#GZO$+fQ$IWO,5;wO$,QQ$IWO,5>jO$,bQ$IWO,5>jO0PQ$IWO,5;vO$,sQ$IWO,5;zO$,xQ$IWO,5;zO#NzQ$IWO'#IQO$,}Q$IWO'#IQO$-SQ$IWO,5;{OOQ$IS,5;|,5;|O'UQ$IWO'#FgOOQ$IU1G1[1G1[O3XQ$IWO1G1[OOQ$ISAN@gAN@gO$-XQ$IWOG27oO$-iQ$IWO,59{OOQ$IS1G3[1G3[OOQ$IS,5<v,5<vOOQ$IS-E:Y-E:YO$-nQ$I[O'#FWO$-uQ$IWO'#IRO$.TQ$IWO'#IRO$.]Q$IWO,5<OOOQ$IS1G1_1G1_O$.bQ$IWO1G1_O$.gQ$IWO,5<uOOQ$IS-E:X-E:XO$/RQ$IWO,5<yO$/jQ$IWO1G4UOOQ$IS-E:]-E:]OOQ$IS1G1b1G1bOOQ$IS1G1f1G1fO$/zQ$IWO,5>lO#NzQ$IWO,5>lOOQ$IS1G1g1G1gO$0YQ$I[O,5<ROOQ$IU7+&v7+&vO$!wQ$IWO1G/gO#NzQ$IWO,5<PO$0aQ$IWO,5>mO$0hQ$IWO,5>mOOQ$IS1G1j1G1jOOQ$IS7+&y7+&yP#NzQ$IWO'#G_O$0pQ$IWO1G4WO$0zQ$IWO1G4WO$1SQ$IWO1G4WOOQ$IS7+%R7+%RO$1bQ$IWO1G1kO$1pQ$I[O'#FWO$1wQ$IWO,5<xOOQ$IS,5<x,5<xO$2VQ$IWO1G4XOOQ$IS-E:[-E:[O#NzQ$IWO,5<wO$2^Q$IWO,5<wO$2cQ$IWO7+)rOOQ$IS-E:Z-E:ZO$2mQ$IWO7+)rO#NzQ$IWO,5<QP#NzQ$IWO'#G^O$2uQ$IWO1G2cO#NzQ$IWO1G2cP$3TQ$IWO'#G]O$3[Q$IWO<<M^O$3fQ$IWO1G1lO$3tQ$IWO7+'}O7WQ$IWO'#C{O7WQ$IWO,59`O7WQ$IWO,59`O7WQ$IWO,59`O$4SQ$I[O,5=WO7WQ$IWO1G.zO/jQ$IWO1G/VO/jQ$IWO7+$nP$4gQ$IWO'#FyO'UQ$IWO'#GlO$4tQ$IWO,59`O$4yQ$IWO,59`O$5QQ$IWO,59kO$5VQ$IWO1G/SO0kQ$IWO'#DPO7WQ$IWO,59h",stateData:"$5m~O%[OS%XOS%WOSQOS~OPhOTeOdsOfXOmtOq!SOtuO}vO!O!PO!R!VO!S!UO!VYO!ZZO!fdO!mdO!ndO!odO!vxO!xyO!zzO!|{O#O|O#S}O#U!OO#X!QO#Y!QO#[!RO#c!TO#f!WO#j!XO#l!YO#q!ZO#tlO#v![O%VqO%gQO%hQO%lRO%mVO&R[O&S]O&V^O&Y_O&``O&caO&ebO~OT!bO]!bO_!cOf!jO!V!lO!d!nO%b!]O%c!^O%d!_O%e!`O%f!`O%g!aO%h!aO%i!bO%j!bO%k!bO~Oi%pXj%pXk%pXl%pXm%pXn%pXq%pXx%pXy%pX!s%pX#^%pX%V%pX%Y%pX%r%pXe%pX!R%pX!S%pX%s%pX!U%pX!Y%pX!O%pX#V%pXr%pX!j%pX~P$bOdsOfXO!VYO!ZZO!fdO!mdO!ndO!odO%gQO%hQO%lRO%mVO&R[O&S]O&V^O&Y_O&``O&caO&ebO~Ox%oXy%oX#^%oX%V%oX%Y%oX%r%oX~Oi!qOj!rOk!pOl!pOm!sOn!tOq!uO!s%oX~P(cOT!{Om/iOt/wO}vO~P'UOT#OOm/iOt/wO!U#PO~P'UOT#SO_#TOm/iOt/wO!Y#UO~P'UO&T#XO&U#ZO~O&W#[O&X#ZO~O!Z#^O&Z#_O&_#aO~O!Z#^O&a#bO&b#aO~O!Z#^O&U#aO&d#dO~O!Z#^O&X#aO&f#fO~OT%aX]%aX_%aXf%aXi%aXj%aXk%aXl%aXm%aXn%aXq%aXx%aX!V%aX!d%aX%b%aX%c%aX%d%aX%e%aX%f%aX%g%aX%h%aX%i%aX%j%aX%k%aXe%aX!R%aX!S%aX~O&R[O&S]O&V^O&Y_O&``O&caO&ebOy%aX!s%aX#^%aX%V%aX%Y%aX%r%aX%s%aX!U%aX!Y%aX!O%aX#V%aXr%aX!j%aX~P+xOx#kOy%`X!s%`X#^%`X%V%`X%Y%`X%r%`X~Om/iOt/wO~P'UO#^#nO%V#pO%Y#pO~O%mVO~O!R#uO#l!YO#q!ZO#tlO~OmtO~P'UOT#zO_#{O%mVOyuP~OT$POm/iOt/wO!O$QO~P'UOy$SO!s$XO%r$TO#^!tX%V!tX%Y!tX~OT$POm/iOt/wO#^!}X%V!}X%Y!}X~P'UOm/iOt/wO#^#RX%V#RX%Y#RX~P'UO!d$_O!m$_O%mVO~OT$iO~P'UO!S$kO#j$lO#l$mO~Oy$nO~OT$uO~P'UOT%OO_%OOe%QOm/iOt/wO~P'UOm/iOt/wOy%TO~P'UO&Q%VO~O_!cOf!jO!V!lO!d!nOT`a]`ai`aj`ak`al`am`an`aq`ax`ay`a!s`a#^`a%V`a%Y`a%b`a%c`a%d`a%e`a%f`a%g`a%h`a%i`a%j`a%k`a%r`ae`a!R`a!S`a%s`a!U`a!Y`a!O`a#V`ar`a!j`a~Ol%[O~Om%[O~P'UOm/iO~P'UOi/kOj/lOk/jOl/jOm/sOn/tOq/xOe%oX!R%oX!S%oX%s%oX!U%oX!Y%oX!O%oX#V%oX!j%oX~P(cO%s%^Oe%nXx%nX!R%nX!S%nX!U%nXy%nX~Oe%`Ox%aO!R%eO!S%dO~Oe%`O~Ox%hO!R%eO!S%dO!U%zX~O!U%lO~Ox%mOy%oO!R%eO!S%dO!Y%uX~O!Y%sO~O!Y%tO~O&T#XO&U%vO~O&W#[O&X%vO~OT%yOm/iOt/wO}vO~P'UO!Z#^O&Z#_O&_%|O~O!Z#^O&a#bO&b%|O~O!Z#^O&U%|O&d#dO~O!Z#^O&X%|O&f#fO~OT!la]!la_!laf!lai!laj!lak!lal!lam!lan!laq!lax!lay!la!V!la!d!la!s!la#^!la%V!la%Y!la%b!la%c!la%d!la%e!la%f!la%g!la%h!la%i!la%j!la%k!la%r!lae!la!R!la!S!la%s!la!U!la!Y!la!O!la#V!lar!la!j!la~P#yOx&ROy%`a!s%`a#^%`a%V%`a%Y%`a%r%`a~P$bOT&TOmtOtuOy%`a!s%`a#^%`a%V%`a%Y%`a%r%`a~P'UOx&ROy%`a!s%`a#^%`a%V%`a%Y%`a%r%`a~OPhOTeOmtOtuO}vO!O!PO!vxO!xyO!zzO!|{O#O|O#S}O#U!OO#X!QO#Y!QO#[!RO#^$tX%V$tX%Y$tX~P'UO#^#nO%V&YO%Y&YO~O!d&ZOf&hX%V&hX#V&hX#^&hX%Y&hX#U&hX~Of!jO%V&]O~Oicajcakcalcamcancaqcaxcayca!sca#^ca%Vca%Yca%rcaeca!Rca!Sca%sca!Uca!Yca!Oca#Vcarca!jca~P$bOqoaxoayoa#^oa%Voa%Yoa%roa~Oi!qOj!rOk!pOl!pOm!sOn!tO!soa~PD_O%r&_Ox%qXy%qX~O%mVOx%qXy%qX~Ox&bOyuX~Oy&dO~Ox%mO#^%uX%V%uX%Y%uXe%uXy%uX!Y%uX!j%uX%r%uX~OT/rOm/iOt/wO}vO~P'UO%r$TO#^Sa%VSa%YSa~Ox&mO#^%wX%V%wX%Y%wXl%wX~P$bOx&pO!O&oO#^#Ra%V#Ra%Y#Ra~O#V&qO#^#Ta%V#Ta%Y#Ta~O!d$_O!m$_O#U&sO%mVO~O#U&sO~Ox&uO#^&kX%V&kX%Y&kX~Ox&wO#^&gX%V&gX%Y&gXy&gX~Ox&{Ol&mX~P$bOl'OO~OPhOTeOmtOtuO}vO!O!PO!vxO!xyO!zzO!|{O#O|O#S}O#U!OO#X!QO#Y!QO#[!RO%V'TO~P'UOr'XO#g'VO#h'WOP#eaT#ead#eaf#eam#eaq#eat#ea}#ea!O#ea!R#ea!S#ea!V#ea!Z#ea!f#ea!m#ea!n#ea!o#ea!v#ea!x#ea!z#ea!|#ea#O#ea#S#ea#U#ea#X#ea#Y#ea#[#ea#c#ea#f#ea#j#ea#l#ea#q#ea#t#ea#v#ea%S#ea%V#ea%g#ea%h#ea%l#ea%m#ea&R#ea&S#ea&V#ea&Y#ea&`#ea&c#ea&e#ea%U#ea%Y#ea~Ox'YO#V'[Oy&nX~Of'^O~Of!jOy$nO~Oy'bO~P$bOT!bO]!bO_!cOf!jO!V!lO!d!nO%d!_O%e!`O%f!`O%g!aO%h!aO%i!bO%j!bO%k!bOiUijUikUilUimUinUiqUixUiyUi!sUi#^Ui%VUi%YUi%bUi%rUieUi!RUi!SUi%sUi!UUi!YUi!OUi#VUirUi!jUi~O%c!^O~P! YO%cUi~P! YOT!bO]!bO_!cOf!jO!V!lO!d!nO%g!aO%h!aO%i!bO%j!bO%k!bOiUijUikUilUimUinUiqUixUiyUi!sUi#^Ui%VUi%YUi%bUi%cUi%dUi%rUieUi!RUi!SUi%sUi!UUi!YUi!OUi#VUirUi!jUi~O%e!`O%f!`O~P!$TO%eUi%fUi~P!$TO_!cOf!jO!V!lO!d!nOiUijUikUilUimUinUiqUixUiyUi!sUi#^Ui%VUi%YUi%bUi%cUi%dUi%eUi%fUi%gUi%hUi%rUieUi!RUi!SUi%sUi!UUi!YUi!OUi#VUirUi!jUi~OT!bO]!bO%i!bO%j!bO%k!bO~P!'ROTUi]Ui%iUi%jUi%kUi~P!'RO!R%eO!S%dOe%}Xx%}X~O%r'fO%s'fO~P+xOx'hOe%|X~Oe'jO~Ox'kOy'mO!U&PX~Om/iOt/wOx'kOy'nO!U&PX~P'UO!U'pO~Ok!pOl!pOm!sOn!tOihiqhixhiyhi!shi#^hi%Vhi%Yhi%rhi~Oj!rO~P!+tOjhi~P!+tOi/kOj/lOk/jOl/jOm/sOn/tO~Or'rO~P!,}OT'wOe'xOm/iOt/wO~P'UOe'xOx'yO~Oe'{O~O!S'}O~Oe(OOx'yO!R%eO!S%dO~P$bOi/kOj/lOk/jOl/jOm/sOn/tOeoa!Roa!Soa%soa!Uoa!Yoa!Ooa#Voaroa!joa~PD_OT'wOm/iOt/wO!U%za~P'UOx(RO!U%za~O!U(SO~Ox(RO!R%eO!S%dO!U%za~P$bOT(WOm/iOt/wO!Y%ua#^%ua%V%ua%Y%uae%uay%ua!j%ua%r%ua~P'UOx(XO!Y%ua#^%ua%V%ua%Y%uae%uay%ua!j%ua%r%ua~O!Y([O~Ox(XO!R%eO!S%dO!Y%ua~P$bOx(_O!R%eO!S%dO!Y%{a~P$bOx(bOy&[X!Y&[X!j&[X~Oy(eO!Y(gO!j(hO~OT&TOmtOtuOy%`i!s%`i#^%`i%V%`i%Y%`i%r%`i~P'UOx(iOy%`i!s%`i#^%`i%V%`i%Y%`i%r%`i~O!d&ZOf&ha%V&ha#V&ha#^&ha%Y&ha#U&ha~O%V(nO~OT#zO_#{O%mVO~Ox&bOyua~OmtOtuO~P'UOx(XO#^%ua%V%ua%Y%uae%uay%ua!Y%ua!j%ua%r%ua~P$bOx(sO#^%`X%V%`X%Y%`X%r%`X~O%r$TO#^Si%VSi%YSi~O#^%wa%V%wa%Y%wal%wa~P'UOx(vO#^%wa%V%wa%Y%wal%wa~OT(zOf(|O%mVO~O#U(}O~O%mVO#^&ka%V&ka%Y&ka~Ox)PO#^&ka%V&ka%Y&ka~Om/iOt/wO#^&ga%V&ga%Y&gay&ga~P'UOx)SO#^&ga%V&ga%Y&gay&ga~Or)WO#a)VOP#_iT#_id#_if#_im#_iq#_it#_i}#_i!O#_i!R#_i!S#_i!V#_i!Z#_i!f#_i!m#_i!n#_i!o#_i!v#_i!x#_i!z#_i!|#_i#O#_i#S#_i#U#_i#X#_i#Y#_i#[#_i#c#_i#f#_i#j#_i#l#_i#q#_i#t#_i#v#_i%S#_i%V#_i%g#_i%h#_i%l#_i%m#_i&R#_i&S#_i&V#_i&Y#_i&`#_i&c#_i&e#_i%U#_i%Y#_i~Or)XOP#biT#bid#bif#bim#biq#bit#bi}#bi!O#bi!R#bi!S#bi!V#bi!Z#bi!f#bi!m#bi!n#bi!o#bi!v#bi!x#bi!z#bi!|#bi#O#bi#S#bi#U#bi#X#bi#Y#bi#[#bi#c#bi#f#bi#j#bi#l#bi#q#bi#t#bi#v#bi%S#bi%V#bi%g#bi%h#bi%l#bi%m#bi&R#bi&S#bi&V#bi&Y#bi&`#bi&c#bi&e#bi%U#bi%Y#bi~OT)ZOl&ma~P'UOx)[Ol&ma~Ox)[Ol&ma~P$bOl)`O~O%T)cO~Or)fO#g'VO#h)eOP#eiT#eid#eif#eim#eiq#eit#ei}#ei!O#ei!R#ei!S#ei!V#ei!Z#ei!f#ei!m#ei!n#ei!o#ei!v#ei!x#ei!z#ei!|#ei#O#ei#S#ei#U#ei#X#ei#Y#ei#[#ei#c#ei#f#ei#j#ei#l#ei#q#ei#t#ei#v#ei%S#ei%V#ei%g#ei%h#ei%l#ei%m#ei&R#ei&S#ei&V#ei&Y#ei&`#ei&c#ei&e#ei%U#ei%Y#ei~Om/iOt/wOy$nO~P'UOm/iOt/wOy&na~P'UOx)lOy&na~OT)pO_)qOe)tO%i)rO%mVO~Oy$nO&q)vO~O%V)zO~OT%OO_%OOm/iOt/wOe%|a~P'UOx*OOe%|a~Om/iOt/wOy*RO!U&Pa~P'UOx*SO!U&Pa~Om/iOt/wOx*SOy*VO!U&Pa~P'UOm/iOt/wOx*SO!U&Pa~P'UOx*SOy*VO!U&Pa~Ok/jOl/jOm/sOn/tOehiihiqhixhi!Rhi!Shi%shi!Uhiyhi!Yhi#^hi%Vhi%Yhi!Ohi#Vhirhi!jhi%rhi~Oj/lO~P!H[Ojhi~P!H[OT'wOe*[Om/iOt/wO~P'UOl*^O~Oe*[Ox*`O~Oe*aO~OT'wOm/iOt/wO!U%zi~P'UOx*bO!U%zi~O!U*cO~OT(WOm/iOt/wO!Y%ui#^%ui%V%ui%Y%uie%uiy%ui!j%ui%r%ui~P'UOx*fO!R%eO!S%dO!Y%{i~Ox*iO!Y%ui#^%ui%V%ui%Y%uie%uiy%ui!j%ui%r%ui~O!Y*jO~O_*lOm/iOt/wO!Y%{i~P'UOx*fO!Y%{i~O!Y*nO~OT*pOm/iOt/wOy&[a!Y&[a!j&[a~P'UOx*qOy&[a!Y&[a!j&[a~O!Z#^O&^*tO!Y!kX~O!Y*vO~Oy(eO!Y*wO~OT&TOmtOtuOy%`q!s%`q#^%`q%V%`q%Y%`q%r%`q~P'UOx$miy$mi!s$mi#^$mi%V$mi%Y$mi%r$mi~P$bOT&TOmtOtuO~P'UOT&TOm/iOt/wO#^%`a%V%`a%Y%`a%r%`a~P'UOx*xO#^%`a%V%`a%Y%`a%r%`a~Ox$`a#^$`a%V$`a%Y$`al$`a~P$bO#^%wi%V%wi%Y%wil%wi~P'UOx*{O#^#Rq%V#Rq%Y#Rq~Ox*|O#V+OO#^&jX%V&jX%Y&jXe&jX~OT+QOf(|O%mVO~O%mVO#^&ki%V&ki%Y&ki~Om/iOt/wO#^&gi%V&gi%Y&giy&gi~P'UOr+UO#a)VOP#_qT#_qd#_qf#_qm#_qq#_qt#_q}#_q!O#_q!R#_q!S#_q!V#_q!Z#_q!f#_q!m#_q!n#_q!o#_q!v#_q!x#_q!z#_q!|#_q#O#_q#S#_q#U#_q#X#_q#Y#_q#[#_q#c#_q#f#_q#j#_q#l#_q#q#_q#t#_q#v#_q%S#_q%V#_q%g#_q%h#_q%l#_q%m#_q&R#_q&S#_q&V#_q&Y#_q&`#_q&c#_q&e#_q%U#_q%Y#_q~Ol$wax$wa~P$bOT)ZOl&mi~P'UOx+]Ol&mi~OPhOTeOmtOq!SOtuO}vO!O!PO!R!VO!S!UO!vxO!xyO!zzO!|{O#O|O#S}O#U!OO#X!QO#Y!QO#[!RO#c!TO#f!WO#j!XO#l!YO#q!ZO#tlO#v![O~P'UOx+gOy$nO#V+gO~O#h+hOP#eqT#eqd#eqf#eqm#eqq#eqt#eq}#eq!O#eq!R#eq!S#eq!V#eq!Z#eq!f#eq!m#eq!n#eq!o#eq!v#eq!x#eq!z#eq!|#eq#O#eq#S#eq#U#eq#X#eq#Y#eq#[#eq#c#eq#f#eq#j#eq#l#eq#q#eq#t#eq#v#eq%S#eq%V#eq%g#eq%h#eq%l#eq%m#eq&R#eq&S#eq&V#eq&Y#eq&`#eq&c#eq&e#eq%U#eq%Y#eq~O#V+iOx$yay$ya~Om/iOt/wOy&ni~P'UOx+kOy&ni~Oy$SO%r+mOe&pXx&pX~O%mVOe&pXx&pX~Ox+qOe&oX~Oe+sO~O%T+uO~OT%OO_%OOm/iOt/wOe%|i~P'UOy+wOx$ca!U$ca~Om/iOt/wOy+xOx$ca!U$ca~P'UOm/iOt/wOy*RO!U&Pi~P'UOx+{O!U&Pi~Om/iOt/wOx+{O!U&Pi~P'UOx+{Oy,OO!U&Pi~Oe$_ix$_i!U$_i~P$bOT'wOm/iOt/wO~P'UOl,QO~OT'wOe,ROm/iOt/wO~P'UOT'wOm/iOt/wO!U%zq~P'UOx$^i!Y$^i#^$^i%V$^i%Y$^ie$^iy$^i!j$^i%r$^i~P$bOT(WOm/iOt/wO~P'UO_*lOm/iOt/wO!Y%{q~P'UOx,SO!Y%{q~O!Y,TO~OT(WOm/iOt/wO!Y%uq#^%uq%V%uq%Y%uqe%uqy%uq!j%uq%r%uq~P'UOy,UO~OT*pOm/iOt/wOy&[i!Y&[i!j&[i~P'UOx,ZOy&[i!Y&[i!j&[i~O!Z#^O&^*tO!Y!ka~OT&TOm/iOt/wO#^%`i%V%`i%Y%`i%r%`i~P'UOx,]O#^%`i%V%`i%Y%`i%r%`i~O%mVO#^&ja%V&ja%Y&jae&ja~Ox,`O#^&ja%V&ja%Y&jae&ja~Oe,cO~Ol$wix$wi~P$bOT)ZO~P'UOT)ZOl&mq~P'UOr,fOP#dyT#dyd#dyf#dym#dyq#dyt#dy}#dy!O#dy!R#dy!S#dy!V#dy!Z#dy!f#dy!m#dy!n#dy!o#dy!v#dy!x#dy!z#dy!|#dy#O#dy#S#dy#U#dy#X#dy#Y#dy#[#dy#c#dy#f#dy#j#dy#l#dy#q#dy#t#dy#v#dy%S#dy%V#dy%g#dy%h#dy%l#dy%m#dy&R#dy&S#dy&V#dy&Y#dy&`#dy&c#dy&e#dy%U#dy%Y#dy~OPhOTeOmtOq!SOtuO}vO!O!PO!R!VO!S!UO!vxO!xyO!zzO!|{O#O|O#S}O#U!OO#X!QO#Y!QO#[!RO#c!TO#f!WO#j!XO#l!YO#q!ZO#tlO#v![O%U,jO%Y,jO~P'UO#h,kOP#eyT#eyd#eyf#eym#eyq#eyt#ey}#ey!O#ey!R#ey!S#ey!V#ey!Z#ey!f#ey!m#ey!n#ey!o#ey!v#ey!x#ey!z#ey!|#ey#O#ey#S#ey#U#ey#X#ey#Y#ey#[#ey#c#ey#f#ey#j#ey#l#ey#q#ey#t#ey#v#ey%S#ey%V#ey%g#ey%h#ey%l#ey%m#ey&R#ey&S#ey&V#ey&Y#ey&`#ey&c#ey&e#ey%U#ey%Y#ey~Om/iOt/wOy&nq~P'UOx,oOy&nq~O%r+mOe&pax&pa~OT)pO_)qO%i)rO%mVOe&oa~Ox,sOe&oa~O#y,wO~OT%OO_%OOm/iOt/wO~P'UOm/iOt/wOy,xOx$ci!U$ci~P'UOm/iOt/wOx$ci!U$ci~P'UOy,xOx$ci!U$ci~Om/iOt/wOy*RO~P'UOm/iOt/wOy*RO!U&Pq~P'UOx,{O!U&Pq~Om/iOt/wOx,{O!U&Pq~P'UOq-OO!R%eO!S%dOe%vq!U%vq!Y%vqx%vq~P!,}O_*lOm/iOt/wO!Y%{y~P'UOx$ai!Y$ai~P$bO_*lOm/iOt/wO~P'UOT*pOm/iOt/wO~P'UOT*pOm/iOt/wOy&[q!Y&[q!j&[q~P'UOT&TOm/iOt/wO#^%`q%V%`q%Y%`q%r%`q~P'UO#V-SOx$ra#^$ra%V$ra%Y$rae$ra~O%mVO#^&ji%V&ji%Y&jie&ji~Ox-UO#^&ji%V&ji%Y&jie&ji~Or-XOP#d!RT#d!Rd#d!Rf#d!Rm#d!Rq#d!Rt#d!R}#d!R!O#d!R!R#d!R!S#d!R!V#d!R!Z#d!R!f#d!R!m#d!R!n#d!R!o#d!R!v#d!R!x#d!R!z#d!R!|#d!R#O#d!R#S#d!R#U#d!R#X#d!R#Y#d!R#[#d!R#c#d!R#f#d!R#j#d!R#l#d!R#q#d!R#t#d!R#v#d!R%S#d!R%V#d!R%g#d!R%h#d!R%l#d!R%m#d!R&R#d!R&S#d!R&V#d!R&Y#d!R&`#d!R&c#d!R&e#d!R%U#d!R%Y#d!R~Om/iOt/wOy&ny~P'UOT)pO_)qO%i)rO%mVOe&oi~O#y,wO%U-_O%Y-_O~OT-iOf-gO!V-fO!Z-hO!f-bO!n-dO!o-dO%h-aO%mVO&R[O&S]O&V^O~Om/iOt/wOx$cq!U$cq~P'UOy-nOx$cq!U$cq~Om/iOt/wOy*RO!U&Py~P'UOx-oO!U&Py~Om/iOt-sO~P'UOq-OO!R%eO!S%dOe%vy!U%vy!Y%vyx%vy~P!,}O%mVO#^&jq%V&jq%Y&jqe&jq~Ox-wO#^&jq%V&jq%Y&jqe&jq~OT)pO_)qO%i)rO%mVO~Of-{O!d-yOx#zX#V#zX%b#zXe#zX~Oq#zXy#zX!U#zX!Y#zX~P$$nO%g-}O%h-}Oq#{Xx#{Xy#{X#V#{X%b#{X!U#{Xe#{X!Y#{X~O!f.PO~Ox.TO#V.VO%b.QOq&rXy&rX!U&rXe&rX~O_.YO~P$ WOf-{Oq&sXx&sXy&sX#V&sX%b&sX!U&sXe&sX!Y&sX~Oq.^Oy$nO~Om/iOt/wOx$cy!U$cy~P'UOm/iOt/wOy*RO!U&P!R~P'UOx.bO!U&P!R~Oe%yXq%yX!R%yX!S%yX!U%yX!Y%yXx%yX~P!,}Oq-OO!R%eO!S%dOe%xa!U%xa!Y%xax%xa~O%mVO#^&jy%V&jy%Y&jye&jy~O!d-yOf$Raq$Rax$Ray$Ra#V$Ra%b$Ra!U$Rae$Ra!Y$Ra~O!f.kO~O%g-}O%h-}Oq#{ax#{ay#{a#V#{a%b#{a!U#{ae#{a!Y#{a~O%b.QOq$Pax$Pay$Pa#V$Pa!U$Pae$Pa!Y$Pa~Oq&ray&ra!U&rae&ra~P#NzOx.pOq&ray&ra!U&rae&ra~O!U.sO~Oe.sO~Oy.uO~O!Y.vO~Om/iOt/wOy*RO!U&P!Z~P'UOy.yO~O%r.zO~P$$nOx.{O#V.VO%b.QOe&uX~Ox.{Oe&uX~Oe.}O~O!f/OO~O#V.VOq$}ax$}ay$}a%b$}a!U$}ae$}a!Y$}a~O#V.VO%b.QOq%Rax%Ray%Ra!U%Rae%Ra~Oq&riy&ri!U&rie&ri~P#NzOx/QO#V.VO%b.QO!Y&ta~Oy$Za~P$bOe&ua~P#NzOx/YOe&ua~O_/[O!Y&ti~P$ WOx/^O!Y&ti~Ox/^O#V.VO%b.QO!Y&ti~O#V.VO%b.QOe$Xix$Xi~O%r/aO~P$$nO#V.VO%b.QOe%Qax%Qa~Oe&ui~P#NzOy/dO~O_/[O!Y&tq~P$ WOx/fO!Y&tq~O#V.VO%b.QOx%Pi!Y%Pi~O_/[O~P$ WO_/[O!Y&ty~P$ WO#V.VO%b.QOe$Yix$Yi~O#V.VO%b.QOx%Pq!Y%Pq~Ox*xO#^%`a%V%`a%Y%`a%r%`a~P$bOT&TOm/iOt/wO~P'UOl/nO~Om/nO~P'UOy/oO~Or/pO~P!,}O&S&V&c&e&R!Z&Z&a&d&f&Y&`&Y%m~",goto:"!9p&vPPPP&wP'P*e*}+h,S,o-]P-zP'P.k.k'PPPP'P2PPPPPPP2P4oPP4oP6{7U=QPP=T=c=fPP'P'PPP=rPP'P'PPP'P'P'P'P'P=v>m'PP>pP>vByFcPFw'PPPPF{GR&wP&w&wP&wP&wP&wP&wP&w&w&wP&wPP&wPP&wPGXPG`GfPG`PG`G`PPPG`PIePInItIzIePG`JQPG`PJXJ_PJcJwKfLPJcJcLVLdJcJcJcJcLxMOMRMWMZMaMgMsNVN]NgNm! Z! a! g! m! w! }!!T!!Z!!a!!g!!y!#T!#Z!#a!#g!#q!#w!#}!$T!$Z!$e!$k!$u!${!%U!%[!%k!%s!%}!&UPPPPPPPPP!&[!&d!&m!&w!'SPPPPPPPPPPPP!+r!,[!0j!3vPP!4O!4^!4g!5]!5S!5f!5l!5o!5r!5u!5}!6nPPPPPPPPPP!6q!6tPPPPPPPPP!6z!7W!7d!7j!7s!7v!7|!8S!8Y!8]P!8e!8n!9j!9m]iOr#n$n)c+c'udOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/x}!dP#j#w$Y$h$t%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!P!eP#j#w$Y$h$t$v%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!R!fP#j#w$Y$h$t$v$w%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!T!gP#j#w$Y$h$t$v$w$x%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!V!hP#j#w$Y$h$t$v$w$x$y%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!X!iP#j#w$Y$h$t$v$w$x$y$z%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m!]!iP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/m'uSOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/x&ZUOXYZhrtv|}!R!S!T!X!j!l!p!q!r!t!u#^#k#n$Q$S$U$X$l$n%O%T%[%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.b.y/i/j/k/l/n/o/p/q/t/x%eWOXYZhrv|}!R!S!T!X!j!l#^#k#n$Q$S$U$X$l$n%O%T%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,S,U,W,Y,Z,],o,q,x,{-n-o.b/o/p/qQ#}uQ.c-sR/u/w'ldOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xW#ql!O!P$`W#yu&b-s/wQ$b!QQ$r!YQ$s!ZW$}!j'h*O+vS&a#z#{Q'R$mQ(l&ZQ(z&qU({&s(|(}U)O&u)P+RQ)n'[W)o'^+q,s-]S+p)p)qY,_*|,`-T-U-wQ,b+OQ,l+gQ,n+il-`,w-f-g-i.R.T.Y.p.u.z/P/[/a/dQ-v-SQ.Z-hQ.g-{Q.r.VU/V.{/Y/bX/]/Q/^/e/fR&`#yi!xXY!S!T%a%h'y(R)V*]*`*bR%_!wQ!|XQ%z#^Q&i$UR&l$XT-r-O.y![!kP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/mQ&^#rR'a$sR'g$}Q%W!nR.e-y'tcOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xS#hc#i!P-d,w-f-g-h-i-{.R.T.Y.p.u.z.{/P/Q/Y/[/^/a/b/d/e/f'tcOSXYZehrstvx|}!R!S!T!U!X![!d!e!f!g!h!i!j!l!p!q!r!t!u!{#O#S#T#^#k#n$P$Q$S$U$X$i$k$l$n$u%O%T%[%_%a%d%h%m%o%y&R&T&`&d&m&o&p&w&{'O'V'Y'g'h'k'm'n'r'w'y'}(R(W(X(_(b(i(k(s(v)S)V)Z)[)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*l*p*q*x*z*{+S+[+]+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.^.b.y/i/j/k/l/n/o/p/q/r/t/xT#hc#iS#__#`S#b`#cS#da#eS#fb#gT*t(e*uT(f%z(hQ$WwR+o)oX$Uw$V$W&kZkOr$n)c+cXoOr)c+cQ$o!WQ&y$fQ&z$gQ']$qQ'`$sQ)a'QQ)g'VQ)i'WQ)j'XQ)w'_Q)y'aQ+V)VQ+X)WQ+Y)XQ+^)_S+`)b)xQ+d)eQ+e)fQ+f)hQ,d+UQ,e+WQ,g+_Q,h+aQ,m+hQ-W,fQ-Y,kQ-Z,lQ-x-XQ._-lR.x.`WoOr)c+cR#tnQ'_$rR)b'RQ+n)oR,q+oQ)x'_R+a)bZmOnr)c+cQ'c$tR){'dT,u+u,vu-k,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dt-k,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dQ.Z-hX/]/Q/^/e/f!P-c,w-f-g-h-i-{.R.T.Y.p.u.z.{/P/Q/Y/[/^/a/b/d/e/fQ.O-bR.l.Pg.R-e.S.h.o.t/S/U/W/c/g/hu-j,w-f-g-i-{.R.T.Y.p.u.z.{/P/Y/[/a/b/dX-|-`-j.g/VR.i-{V/X.{/Y/bR.`-lQrOR#vrQ&c#|R(q&cS%n#R$OS(Y%n(]T(]%q&eQ%b!zQ%i!}W'z%b%i(P(TQ(P%fR(T%kQ&n$YR(w&nQ(`%rQ*g(ZT*m(`*gQ'i%PR*P'iS'l%S%TY*T'l*U+|,|-pU*U'm'n'oU+|*V*W*XS,|+},OR-p,}Q#Y]R%u#YQ#]^R%w#]Q#`_R%{#`Q(c%xS*r(c*sR*s(dQ*u(eR,[*uQ#c`R%}#cQ#eaR&O#eQ#gbR&P#gQ#icR&Q#iQ#lfQ&S#jW&V#l&S(t*yQ(t&hR*y/mQ$VwS&j$V&kR&k$WQ&x$dR)T&xQ&[#qR(m&[Q$`!PR&r$`Q*}({S,a*}-VR-V,bQ&v$bR)Q&vQ#ojR&X#oQ+c)cR,i+cQ)U&yR+T)UQ&|$hS)]&|)^R)^&}Q'U$oR)d'UQ'Z$pS)m'Z+lR+l)nQ+r)sR,t+rWnOr)c+cR#snQ,v+uR-^,vd.S-e.h.o.t/S/U/W/c/g/hR.n.SU-z-`.g/VR.f-zQ/R.tS/_/R/`R/`/SS.|.h.iR/Z.|Q.U-eR.q.USqOrT+b)c+cWpOr)c+cR'S$nYjOr$n)c+cR&W#n[wOr#n$n)c+cR&i$U&YPOXYZhrtv|}!R!S!T!X!j!l!p!q!r!t!u#^#k#n$Q$S$U$X$l$n%O%T%[%_%a%h%m%o%y&R&`&d&o&p&w'O'V'Y'g'h'k'm'n'r'y(R(X(_(b(i(k(s)S)V)`)c)l)v*O*R*S*V*]*^*`*b*e*f*i*p*q*x*{+S+c+j+k+n+v+w+x+z+{,O,Q,S,U,W,Y,Z,],o,q,x,{-O-n-o.b.y/i/j/k/l/n/o/p/q/t/xQ!oSQ#jeQ#wsU$Yx%d'}S$h!U$kQ$t![Q$v!dQ$w!eQ$x!fQ$y!gQ$z!hQ${!iQ%f!{Q%k#OQ%q#SQ%r#TQ&e$PQ&}$iQ'd$uQ(j&TU(u&m(v*zW)Y&{)[+[+]Q*Z'wQ*d(WQ+Z)ZQ,V*lQ.w.^R/m/rQ!zXQ!}YQ$f!SQ$g!T^'v%a%h'y(R*]*`*bR+W)V[fOr#n$n)c+ch!wXY!S!T%a%h'y(R)V*]*`*bQ#RZQ#mhS$Ov|Q$]}W$d!R$X'O)`S$p!X$lW$|!j'h*O+vQ%S!lQ%x#^`&U#k&R(i(k(s*x,]/qQ&f$QQ&g$SQ&h$UQ'e%OQ'o%TQ'u%_W(V%m(X*e*iQ(Z%oQ(d%yQ(o&`S(r&d/oQ(x&oQ(y&pU)R&w)S+SQ)h'VY)k'Y)l+j+k,oQ)|'g^*Q'k*S+z+{,{-o.bQ*W'mQ*X'nS*Y'r/pW*k(_*f,S,WW*o(b*q,Y,ZQ+t)vQ+y*RQ+}*VQ,X*pQ,^*{Q,p+nQ,y+wQ,z+xQ,},OQ-R,UQ-[,qQ-m,xR.a-nhTOr#k#n$n&R&d'r(i(k)c+c$z!vXYZhv|}!R!S!T!X!j!l#^$Q$S$U$X$l%O%T%_%a%h%m%o%y&`&o&p&w'O'V'Y'g'h'k'm'n'y(R(X(_(b(s)S)V)`)l)v*O*R*S*V*]*`*b*e*f*i*p*q*x*{+S+j+k+n+v+w+x+z+{,O,S,U,W,Y,Z,],o,q,x,{-n-o.b/o/p/qQ#xtW%X!p!t/j/tQ%Y!qQ%Z!rQ%]!uQ%g/iS'q%[/nQ's/kQ't/lQ,P*^Q-Q,QS-q-O.yR/v/xU#|u-s/wR(p&b[gOr#n$n)c+cX!yX#^$U$XQ#WZQ$RvR$[|Q%c!zQ%j!}Q%p#RQ'e$|Q(Q%fQ(U%kQ(^%qQ(a%rQ*h(ZQ-P,PQ-u-QR.d-tQ$ZxQ'|%dR*_'}Q-t-OR/T.yR#QYR#VZR%R!jQ%P!jV)}'h*O+v!]!mP!o#j#w$Y$h$t$v$w$x$y$z${%f%k%q%r&e&}'d(j(u)Y*Z*d+Z,V.w/mR%U!lR%z#^Q(g%zR*w(hQ$e!RQ&l$XQ)_'OR+_)`Q#rlQ$^!OQ$a!PR&t$`Q(z&sR+Q(}Q(z&sQ+P(|R+Q(}R$c!QXpOr)c+cQ$j!UR'P$kQ$q!XR'Q$lR)u'^Q)s'^V,r+q,s-]Q-l,wQ.W-fR.X-gU-e,w-f-gQ.]-iQ.h-{Q.m.RU.o.T.p/PQ.t.YQ/S.uQ/U.zU/W.{/Y/bQ/c/[Q/g/aR/h/dR.[-hR.j-{",nodeNames:"⚠ print Comment Script AssignStatement * BinaryExpression BitOp BitOp BitOp BitOp ArithOp ArithOp @ ArithOp ** UnaryExpression ArithOp BitOp AwaitExpression await ) ( ParenthesizedExpression BinaryExpression or and CompareOp in not is UnaryExpression ConditionalExpression if else LambdaExpression lambda ParamList VariableName AssignOp , : NamedExpression AssignOp YieldExpression yield from TupleExpression ComprehensionExpression async for LambdaExpression ] [ ArrayExpression ArrayComprehensionExpression } { DictionaryExpression DictionaryComprehensionExpression SetExpression SetComprehensionExpression CallExpression ArgList AssignOp MemberExpression . PropertyName Number String FormatString FormatReplacement FormatConversion FormatSpec ContinuedString Ellipsis None Boolean TypeDef AssignOp UpdateStatement UpdateOp ExpressionStatement DeleteStatement del PassStatement pass BreakStatement break ContinueStatement continue ReturnStatement return YieldStatement PrintStatement RaiseStatement raise ImportStatement import as ScopeStatement global nonlocal AssertStatement assert StatementGroup ; IfStatement Body elif WhileStatement while ForStatement TryStatement try except finally WithStatement with FunctionDefinition def ParamList AssignOp TypeDef ClassDefinition class DecoratedStatement Decorator At MatchStatement match MatchBody MatchClause case CapturePattern LiteralPattern ArithOp ArithOp AsPattern OrPattern LogicOp AttributePattern SequencePattern MappingPattern StarPattern ClassPattern PatternArgList KeywordPattern KeywordPattern Guard",maxTerm:267,context:PO,nodeProps:[["group",-14,4,80,82,83,85,87,89,91,93,94,95,97,100,103,"Statement Statement",-22,6,16,19,23,38,47,48,54,55,58,59,60,61,62,65,68,69,70,74,75,76,77,"Expression",-10,105,107,110,112,113,117,119,124,126,129,"Statement",-9,134,135,138,139,141,142,143,144,145,"Pattern"],["openedBy",21,"(",52,"[",56,"{"],["closedBy",22,")",53,"]",57,"}"]],propSources:[sO],skippedNodes:[0,2],repeatNodeCount:38,tokenData:"&JdMgR!^OX$}XY!&]Y[$}[]!&]]p$}pq!&]qr!(grs!,^st!IYtu$}uv$5[vw$7nwx$8zxy%'vyz%(|z{%*S{|%,r|}%.O}!O%/U!O!P%1k!P!Q%<q!Q!R%?a!R![%Cc![!]%N_!]!^&!q!^!_&#w!_!`&&g!`!a&'s!a!b$}!b!c&*`!c!d&+n!d!e&-`!e!h&+n!h!i&7[!i!t&+n!t!u&@j!u!w&+n!w!x&5j!x!}&+n!}#O&Bt#O#P!'u#P#Q&Cz#Q#R&EQ#R#S&+n#S#T$}#T#U&+n#U#V&-`#V#Y&+n#Y#Z&7[#Z#f&+n#f#g&@j#g#i&+n#i#j&5j#j#o&+n#o#p&F^#p#q&GS#q#r&H`#r#s&I^#s$g$}$g~&+n<r%`Z&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<Q&^Z&^7[&TS&Z`&d!bOr'PrsFisw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'P<Q'`Z&^7[&TS&WW&Z`&d!b&f#tOr'Prs&Rsw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'P;p([Z&^7[&WW&f#tOr(}rs)}sw(}wx={x#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(};p)[Z&^7[&TS&WW&d!b&f#tOr(}rs)}sw(}wx(Rx#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(};p*WZ&^7[&TS&d!bOr(}rs*ysw(}wx(Rx#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(};p+SZ&^7[&TS&d!bOr(}rs+usw(}wx(Rx#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}8r,OX&^7[&TS&d!bOw+uwx,kx#O+u#O#P.]#P#o+u#o#p0d#p#q+u#q#r.q#r~+u8r,pX&^7[Ow+uwx-]x#O+u#O#P.]#P#o+u#o#p0d#p#q+u#q#r.q#r~+u8r-bX&^7[Ow+uwx-}x#O+u#O#P.]#P#o+u#o#p0d#p#q+u#q#r.q#r~+u7[.SR&^7[O#o-}#p#q-}#r~-}8r.bT&^7[O#o+u#o#p.q#p#q+u#q#r.q#r~+u!f.xV&TS&d!bOw.qwx/_x#O.q#O#P0^#P#o.q#o#p0d#p~.q!f/bVOw.qwx/wx#O.q#O#P0^#P#o.q#o#p0d#p~.q!f/zUOw.qx#O.q#O#P0^#P#o.q#o#p0d#p~.q!f0aPO~.q!f0iV&TSOw1Owx1dx#O1O#O#P2V#P#o1O#o#p.q#p~1OS1TT&TSOw1Owx1dx#O1O#O#P2V#P~1OS1gTOw1Owx1vx#O1O#O#P2V#P~1OS1ySOw1Ox#O1O#O#P2V#P~1OS2YPO~1O;p2bT&^7[O#o(}#o#p2q#p#q(}#q#r2q#r~(}%d2|X&TS&WW&d!b&f#tOr2qrs3isw2qwx5Px#O2q#O#P:R#P#o2q#o#p:X#p~2q%d3pX&TS&d!bOr2qrs4]sw2qwx5Px#O2q#O#P:R#P#o2q#o#p:X#p~2q%d4dX&TS&d!bOr2qrs.qsw2qwx5Px#O2q#O#P:R#P#o2q#o#p:X#p~2q%d5WX&WW&f#tOr2qrs3isw2qwx5sx#O2q#O#P:R#P#o2q#o#p:X#p~2q%d5zX&WW&f#tOr2qrs3isw2qwx6gx#O2q#O#P:R#P#o2q#o#p:X#p~2q#|6nV&WW&f#tOr6grs7Ts#O6g#O#P8S#P#o6g#o#p8Y#p~6g#|7WVOr6grs7ms#O6g#O#P8S#P#o6g#o#p8Y#p~6g#|7pUOr6gs#O6g#O#P8S#P#o6g#o#p8Y#p~6g#|8VPO~6g#|8_V&WWOr8trs9Ys#O8t#O#P9{#P#o8t#o#p6g#p~8tW8yT&WWOr8trs9Ys#O8t#O#P9{#P~8tW9]TOr8trs9ls#O8t#O#P9{#P~8tW9oSOr8ts#O8t#O#P9{#P~8tW:OPO~8t%d:UPO~2q%d:`X&TS&WWOr:{rs;isw:{wx<ox#O:{#O#P=u#P#o:{#o#p2q#p~:{[;SV&TS&WWOr:{rs;isw:{wx<ox#O:{#O#P=u#P~:{[;nV&TSOr:{rs<Tsw:{wx<ox#O:{#O#P=u#P~:{[<YV&TSOr:{rs1Osw:{wx<ox#O:{#O#P=u#P~:{[<tV&WWOr:{rs;isw:{wx=Zx#O:{#O#P=u#P~:{[=`V&WWOr:{rs;isw:{wx8tx#O:{#O#P=u#P~:{[=xPO~:{;p>UZ&^7[&WW&f#tOr(}rs)}sw(}wx>wx#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}:Y?QX&^7[&WW&f#tOr>wrs?ms#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:Y?rX&^7[Or>wrs@_s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:Y@dX&^7[Or>wrs-}s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>w:YAUT&^7[O#o>w#o#p6g#p#q>w#q#r6g#r~>w<QAjT&^7[O#o'P#o#pAy#p#q'P#q#rAy#r~'P%tBWX&TS&WW&Z`&d!b&f#tOrAyrsBsswAywx5Px#OAy#O#PEo#P#oAy#o#pEu#p~Ay%tB|X&TS&Z`&d!bOrAyrsCiswAywx5Px#OAy#O#PEo#P#oAy#o#pEu#p~Ay%tCrX&TS&Z`&d!bOrAyrsD_swAywx5Px#OAy#O#PEo#P#oAy#o#pEu#p~Ay!vDhV&TS&Z`&d!bOwD_wx/_x#OD_#O#PD}#P#oD_#o#pET#p~D_!vEQPO~D_!vEYV&TSOw1Owx1dx#O1O#O#P2V#P#o1O#o#pD_#p~1O%tErPO~Ay%tE|X&TS&WWOr:{rs;isw:{wx<ox#O:{#O#P=u#P#o:{#o#pAy#p~:{<QFtZ&^7[&TS&Z`&d!bOr'PrsGgsw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'P9SGrX&^7[&TS&Z`&d!bOwGgwx,kx#OGg#O#PH_#P#oGg#o#pET#p#qGg#q#rD_#r~Gg9SHdT&^7[O#oGg#o#pD_#p#qGg#q#rD_#r~Gg<bIOZ&^7[&WW&ap&f#tOrIqrs)}swIqwx! wx#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~Iq<bJQZ&^7[&TS&WW&ap&d!b&f#tOrIqrs)}swIqwxHsx#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~Iq<bJxT&^7[O#oIq#o#pKX#p#qIq#q#rKX#r~Iq&UKfX&TS&WW&ap&d!b&f#tOrKXrs3iswKXwxLRx#OKX#O#PN}#P#oKX#o#p! T#p~KX&UL[X&WW&ap&f#tOrKXrs3iswKXwxLwx#OKX#O#PN}#P#oKX#o#p! T#p~KX&UMQX&WW&ap&f#tOrKXrs3iswKXwxMmx#OKX#O#PN}#P#oKX#o#p! T#p~KX$nMvV&WW&ap&f#tOrMmrs7Ts#OMm#O#PN]#P#oMm#o#pNc#p~Mm$nN`PO~Mm$nNhV&WWOr8trs9Ys#O8t#O#P9{#P#o8t#o#pMm#p~8t&U! QPO~KX&U! [X&TS&WWOr:{rs;isw:{wx<ox#O:{#O#P=u#P#o:{#o#pKX#p~:{<b!!SZ&^7[&WW&ap&f#tOrIqrs)}swIqwx!!ux#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~Iq:z!#QX&^7[&WW&ap&f#tOr!!urs?ms#O!!u#O#P!#m#P#o!!u#o#pNc#p#q!!u#q#rMm#r~!!u:z!#rT&^7[O#o!!u#o#pMm#p#q!!u#q#rMm#r~!!u<r!$WT&^7[O#o$}#o#p!$g#p#q$}#q#r!$g#r~$}&f!$vX&TS&WW&Z`&ap&d!b&f#tOr!$grsBssw!$gwxLRx#O!$g#O#P!%c#P#o!$g#o#p!%i#p~!$g&f!%fPO~!$g&f!%pX&TS&WWOr:{rs;isw:{wx<ox#O:{#O#P=u#P#o:{#o#p!$g#p~:{Mg!&pa&^7[&TS&WW%[1s&Z`&ap&d!b&f#tOX$}XY!&]Y[$}[]!&]]p$}pq!&]qr$}rs&Rsw$}wxHsx#O$}#O#P!'u#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Mg!'zX&^7[OY$}YZ!&]Z]$}]^!&]^#o$}#o#p!$g#p#q$}#q#r!$g#r~$}<u!(xb&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`!*Q!`#O$}#O#P!$R#P#T$}#T#U!+W#U#f$}#f#g!+W#g#h!+W#h#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<u!*eZkR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<u!+kZ!jR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{!,m_&bp&^7[&TS&R,X&Z`&d!bOY!-lYZ'PZ]!-l]^'P^r!-lrs!G^sw!-lwx!/|x#O!-l#O#P!Cp#P#o!-l#o#p!F[#p#q!-l#q#r!DU#r~!-lGZ!-}_&^7[&TS&WW&R,X&Z`&d!b&f#tOY!-lYZ'PZ]!-l]^'P^r!-lrs!.|sw!-lwx!/|x#O!-l#O#P!Cp#P#o!-l#o#p!F[#p#q!-l#q#r!DU#r~!-lGZ!/ZZ&^7[&TS&R,X&Z`&d!bOr'PrsFisw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PFy!0X_&^7[&WW&R,X&f#tOY!1WYZ(}Z]!1W]^(}^r!1Wrs!2fsw!1Wwx!@Yx#O!1W#O#P!3d#P#o!1W#o#p!;t#p#q!1W#q#r!3x#r~!1WFy!1g_&^7[&TS&WW&R,X&d!b&f#tOY!1WYZ(}Z]!1W]^(}^r!1Wrs!2fsw!1Wwx!/|x#O!1W#O#P!3d#P#o!1W#o#p!;t#p#q!1W#q#r!3x#r~!1WFy!2qZ&^7[&TS&R,X&d!bOr(}rs*ysw(}wx(Rx#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}Fy!3iT&^7[O#o!1W#o#p!3x#p#q!1W#q#r!3x#r~!1W0m!4V]&TS&WW&R,X&d!b&f#tOY!3xYZ2qZ]!3x]^2q^r!3xrs!5Osw!3xwx!5tx#O!3x#O#P!;n#P#o!3x#o#p!;t#p~!3x0m!5XX&TS&R,X&d!bOr2qrs4]sw2qwx5Px#O2q#O#P:R#P#o2q#o#p:X#p~2q0m!5}]&WW&R,X&f#tOY!3xYZ2qZ]!3x]^2q^r!3xrs!5Osw!3xwx!6vx#O!3x#O#P!;n#P#o!3x#o#p!;t#p~!3x0m!7P]&WW&R,X&f#tOY!3xYZ2qZ]!3x]^2q^r!3xrs!5Osw!3xwx!7xx#O!3x#O#P!;n#P#o!3x#o#p!;t#p~!3x/V!8RZ&WW&R,X&f#tOY!7xYZ6gZ]!7x]^6g^r!7xrs!8ts#O!7x#O#P!9`#P#o!7x#o#p!9f#p~!7x/V!8yV&R,XOr6grs7ms#O6g#O#P8S#P#o6g#o#p8Y#p~6g/V!9cPO~!7x/V!9mZ&WW&R,XOY!:`YZ8tZ]!:`]^8t^r!:`rs!;Ss#O!:`#O#P!;h#P#o!:`#o#p!7x#p~!:`,a!:gX&WW&R,XOY!:`YZ8tZ]!:`]^8t^r!:`rs!;Ss#O!:`#O#P!;h#P~!:`,a!;XT&R,XOr8trs9ls#O8t#O#P9{#P~8t,a!;kPO~!:`0m!;qPO~!3x0m!;}]&TS&WW&R,XOY!<vYZ:{Z]!<v]^:{^r!<vrs!=rsw!<vwx!>`x#O!<v#O#P!@S#P#o!<v#o#p!3x#p~!<v,e!=PZ&TS&WW&R,XOY!<vYZ:{Z]!<v]^:{^r!<vrs!=rsw!<vwx!>`x#O!<v#O#P!@S#P~!<v,e!=yV&TS&R,XOr:{rs<Tsw:{wx<ox#O:{#O#P=u#P~:{,e!>gZ&WW&R,XOY!<vYZ:{Z]!<v]^:{^r!<vrs!=rsw!<vwx!?Yx#O!<v#O#P!@S#P~!<v,e!?aZ&WW&R,XOY!<vYZ:{Z]!<v]^:{^r!<vrs!=rsw!<vwx!:`x#O!<v#O#P!@S#P~!<v,e!@VPO~!<vFy!@e_&^7[&WW&R,X&f#tOY!1WYZ(}Z]!1W]^(}^r!1Wrs!2fsw!1Wwx!Adx#O!1W#O#P!3d#P#o!1W#o#p!;t#p#q!1W#q#r!3x#r~!1WEc!Ao]&^7[&WW&R,X&f#tOY!AdYZ>wZ]!Ad]^>w^r!Adrs!Bhs#O!Ad#O#P!C[#P#o!Ad#o#p!9f#p#q!Ad#q#r!7x#r~!AdEc!BoX&^7[&R,XOr>wrs@_s#O>w#O#PAP#P#o>w#o#p8Y#p#q>w#q#r6g#r~>wEc!CaT&^7[O#o!Ad#o#p!7x#p#q!Ad#q#r!7x#r~!AdGZ!CuT&^7[O#o!-l#o#p!DU#p#q!-l#q#r!DU#r~!-l0}!De]&TS&WW&R,X&Z`&d!b&f#tOY!DUYZAyZ]!DU]^Ay^r!DUrs!E^sw!DUwx!5tx#O!DU#O#P!FU#P#o!DU#o#p!F[#p~!DU0}!EiX&TS&R,X&Z`&d!bOrAyrsCiswAywx5Px#OAy#O#PEo#P#oAy#o#pEu#p~Ay0}!FXPO~!DU0}!Fe]&TS&WW&R,XOY!<vYZ:{Z]!<v]^:{^r!<vrs!=rsw!<vwx!>`x#O!<v#O#P!@S#P#o!<v#o#p!DU#p~!<vGZ!GkZ&^7[&TS&R,X&Z`&d!bOr'Prs!H^sw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PGZ!HmX&X#|&^7[&TS&V,X&Z`&d!bOwGgwx,kx#OGg#O#PH_#P#oGg#o#pET#p#qGg#q#rD_#r~GgMg!Im_Q1s&^7[&TS&WW&Z`&ap&d!b&f#tOY!IYYZ$}Z]!IY]^$}^r!IYrs!Jlsw!IYwx$$[x#O!IY#O#P$1v#P#o!IY#o#p$4Y#p#q!IY#q#r$2j#r~!IYLu!Jy_Q1s&^7[&TS&Z`&d!bOY!KxYZ'PZ]!Kx]^'P^r!Kxrs$ Usw!Kxwx!MYx#O!Kx#O#P#G^#P#o!Kx#o#p#NS#p#q!Kx#q#r#HQ#r~!KxLu!LZ_Q1s&^7[&TS&WW&Z`&d!b&f#tOY!KxYZ'PZ]!Kx]^'P^r!Kxrs!Jlsw!Kxwx!MYx#O!Kx#O#P#G^#P#o!Kx#o#p#NS#p#q!Kx#q#r#HQ#r~!KxLe!Me_Q1s&^7[&WW&f#tOY!NdYZ(}Z]!Nd]^(}^r!Ndrs# rsw!Ndwx#B[x#O!Nd#O#P#/f#P#o!Nd#o#p#<b#p#q!Nd#q#r#0Y#r~!NdLe!Ns_Q1s&^7[&TS&WW&d!b&f#tOY!NdYZ(}Z]!Nd]^(}^r!Ndrs# rsw!Ndwx!MYx#O!Nd#O#P#/f#P#o!Nd#o#p#<b#p#q!Nd#q#r#0Y#r~!NdLe# }_Q1s&^7[&TS&d!bOY!NdYZ(}Z]!Nd]^(}^r!Ndrs#!|sw!Ndwx!MYx#O!Nd#O#P#/f#P#o!Nd#o#p#<b#p#q!Nd#q#r#0Y#r~!NdLe##X_Q1s&^7[&TS&d!bOY!NdYZ(}Z]!Nd]^(}^r!Ndrs#$Wsw!Ndwx!MYx#O!Nd#O#P#/f#P#o!Nd#o#p#<b#p#q!Nd#q#r#0Y#r~!NdIg#$c]Q1s&^7[&TS&d!bOY#$WYZ+uZ]#$W]^+u^w#$Wwx#%[x#O#$W#O#P#(^#P#o#$W#o#p#,Q#p#q#$W#q#r#)Q#r~#$WIg#%c]Q1s&^7[OY#$WYZ+uZ]#$W]^+u^w#$Wwx#&[x#O#$W#O#P#(^#P#o#$W#o#p#,Q#p#q#$W#q#r#)Q#r~#$WIg#&c]Q1s&^7[OY#$WYZ+uZ]#$W]^+u^w#$Wwx#'[x#O#$W#O#P#(^#P#o#$W#o#p#,Q#p#q#$W#q#r#)Q#r~#$WHP#'cXQ1s&^7[OY#'[YZ-}Z]#'[]^-}^#o#'[#o#p#(O#p#q#'[#q#r#(O#r~#'[1s#(TRQ1sOY#(OZ]#(O^~#(OIg#(eXQ1s&^7[OY#$WYZ+uZ]#$W]^+u^#o#$W#o#p#)Q#p#q#$W#q#r#)Q#r~#$W3Z#)ZZQ1s&TS&d!bOY#)QYZ.qZ]#)Q]^.q^w#)Qwx#)|x#O#)Q#O#P#+l#P#o#)Q#o#p#,Q#p~#)Q3Z#*RZQ1sOY#)QYZ.qZ]#)Q]^.q^w#)Qwx#*tx#O#)Q#O#P#+l#P#o#)Q#o#p#,Q#p~#)Q3Z#*yZQ1sOY#)QYZ.qZ]#)Q]^.q^w#)Qwx#(Ox#O#)Q#O#P#+l#P#o#)Q#o#p#,Q#p~#)Q3Z#+qTQ1sOY#)QYZ.qZ]#)Q]^.q^~#)Q3Z#,XZQ1s&TSOY#,zYZ1OZ]#,z]^1O^w#,zwx#-nx#O#,z#O#P#/Q#P#o#,z#o#p#)Q#p~#,z1w#-RXQ1s&TSOY#,zYZ1OZ]#,z]^1O^w#,zwx#-nx#O#,z#O#P#/Q#P~#,z1w#-sXQ1sOY#,zYZ1OZ]#,z]^1O^w#,zwx#.`x#O#,z#O#P#/Q#P~#,z1w#.eXQ1sOY#,zYZ1OZ]#,z]^1O^w#,zwx#(Ox#O#,z#O#P#/Q#P~#,z1w#/VTQ1sOY#,zYZ1OZ]#,z]^1O^~#,zLe#/mXQ1s&^7[OY!NdYZ(}Z]!Nd]^(}^#o!Nd#o#p#0Y#p#q!Nd#q#r#0Y#r~!Nd6X#0g]Q1s&TS&WW&d!b&f#tOY#0YYZ2qZ]#0Y]^2q^r#0Yrs#1`sw#0Ywx#3dx#O#0Y#O#P#;|#P#o#0Y#o#p#<b#p~#0Y6X#1i]Q1s&TS&d!bOY#0YYZ2qZ]#0Y]^2q^r#0Yrs#2bsw#0Ywx#3dx#O#0Y#O#P#;|#P#o#0Y#o#p#<b#p~#0Y6X#2k]Q1s&TS&d!bOY#0YYZ2qZ]#0Y]^2q^r#0Yrs#)Qsw#0Ywx#3dx#O#0Y#O#P#;|#P#o#0Y#o#p#<b#p~#0Y6X#3m]Q1s&WW&f#tOY#0YYZ2qZ]#0Y]^2q^r#0Yrs#1`sw#0Ywx#4fx#O#0Y#O#P#;|#P#o#0Y#o#p#<b#p~#0Y6X#4o]Q1s&WW&f#tOY#0YYZ2qZ]#0Y]^2q^r#0Yrs#1`sw#0Ywx#5hx#O#0Y#O#P#;|#P#o#0Y#o#p#<b#p~#0Y4q#5qZQ1s&WW&f#tOY#5hYZ6gZ]#5h]^6g^r#5hrs#6ds#O#5h#O#P#8S#P#o#5h#o#p#8h#p~#5h4q#6iZQ1sOY#5hYZ6gZ]#5h]^6g^r#5hrs#7[s#O#5h#O#P#8S#P#o#5h#o#p#8h#p~#5h4q#7aZQ1sOY#5hYZ6gZ]#5h]^6g^r#5hrs#(Os#O#5h#O#P#8S#P#o#5h#o#p#8h#p~#5h4q#8XTQ1sOY#5hYZ6gZ]#5h]^6g^~#5h4q#8oZQ1s&WWOY#9bYZ8tZ]#9b]^8t^r#9brs#:Us#O#9b#O#P#;h#P#o#9b#o#p#5h#p~#9b1{#9iXQ1s&WWOY#9bYZ8tZ]#9b]^8t^r#9brs#:Us#O#9b#O#P#;h#P~#9b1{#:ZXQ1sOY#9bYZ8tZ]#9b]^8t^r#9brs#:vs#O#9b#O#P#;h#P~#9b1{#:{XQ1sOY#9bYZ8tZ]#9b]^8t^r#9brs#(Os#O#9b#O#P#;h#P~#9b1{#;mTQ1sOY#9bYZ8tZ]#9b]^8t^~#9b6X#<RTQ1sOY#0YYZ2qZ]#0Y]^2q^~#0Y6X#<k]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p#0Y#p~#=d2P#=mZQ1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#>gZQ1s&TSOY#=dYZ:{Z]#=d]^:{^r#=drs#?Ysw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#?aZQ1s&TSOY#=dYZ:{Z]#=d]^:{^r#=drs#,zsw#=dwx#@Sx#O#=d#O#P#Av#P~#=d2P#@ZZQ1s&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@|x#O#=d#O#P#Av#P~#=d2P#ATZQ1s&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#9bx#O#=d#O#P#Av#P~#=d2P#A{TQ1sOY#=dYZ:{Z]#=d]^:{^~#=dLe#Bg_Q1s&^7[&WW&f#tOY!NdYZ(}Z]!Nd]^(}^r!Ndrs# rsw!Ndwx#Cfx#O!Nd#O#P#/f#P#o!Nd#o#p#<b#p#q!Nd#q#r#0Y#r~!NdJ}#Cq]Q1s&^7[&WW&f#tOY#CfYZ>wZ]#Cf]^>w^r#Cfrs#Djs#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#Dq]Q1s&^7[OY#CfYZ>wZ]#Cf]^>w^r#Cfrs#Ejs#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#Eq]Q1s&^7[OY#CfYZ>wZ]#Cf]^>w^r#Cfrs#'[s#O#Cf#O#P#Fj#P#o#Cf#o#p#8h#p#q#Cf#q#r#5h#r~#CfJ}#FqXQ1s&^7[OY#CfYZ>wZ]#Cf]^>w^#o#Cf#o#p#5h#p#q#Cf#q#r#5h#r~#CfLu#GeXQ1s&^7[OY!KxYZ'PZ]!Kx]^'P^#o!Kx#o#p#HQ#p#q!Kx#q#r#HQ#r~!Kx6i#Ha]Q1s&TS&WW&Z`&d!b&f#tOY#HQYZAyZ]#HQ]^Ay^r#HQrs#IYsw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ6i#Ie]Q1s&TS&Z`&d!bOY#HQYZAyZ]#HQ]^Ay^r#HQrs#J^sw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ6i#Ji]Q1s&TS&Z`&d!bOY#HQYZAyZ]#HQ]^Ay^r#HQrs#Kbsw#HQwx#3dx#O#HQ#O#P#Mn#P#o#HQ#o#p#NS#p~#HQ3k#KmZQ1s&TS&Z`&d!bOY#KbYZD_Z]#Kb]^D_^w#Kbwx#)|x#O#Kb#O#P#L`#P#o#Kb#o#p#Lt#p~#Kb3k#LeTQ1sOY#KbYZD_Z]#Kb]^D_^~#Kb3k#L{ZQ1s&TSOY#,zYZ1OZ]#,z]^1O^w#,zwx#-nx#O#,z#O#P#/Q#P#o#,z#o#p#Kb#p~#,z6i#MsTQ1sOY#HQYZAyZ]#HQ]^Ay^~#HQ6i#N]]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p#HQ#p~#=dLu$ c_Q1s&^7[&TS&Z`&d!bOY!KxYZ'PZ]!Kx]^'P^r!Kxrs$!bsw!Kxwx!MYx#O!Kx#O#P#G^#P#o!Kx#o#p#NS#p#q!Kx#q#r#HQ#r~!KxIw$!o]Q1s&^7[&TS&Z`&d!bOY$!bYZGgZ]$!b]^Gg^w$!bwx#%[x#O$!b#O#P$#h#P#o$!b#o#p#Lt#p#q$!b#q#r#Kb#r~$!bIw$#oXQ1s&^7[OY$!bYZGgZ]$!b]^Gg^#o$!b#o#p#Kb#p#q$!b#q#r#Kb#r~$!bMV$$i_Q1s&^7[&WW&ap&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$.px#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hMV$%y_Q1s&^7[&TS&WW&ap&d!b&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$$[x#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hMV$'PXQ1s&^7[OY$%hYZIqZ]$%h]^Iq^#o$%h#o#p$'l#p#q$%h#q#r$'l#r~$%h6y$'{]Q1s&TS&WW&ap&d!b&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$(tx#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l6y$)P]Q1s&WW&ap&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$)xx#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l6y$*T]Q1s&WW&ap&f#tOY$'lYZKXZ]$'l]^KX^r$'lrs#1`sw$'lwx$*|x#O$'l#O#P$-Y#P#o$'l#o#p$-n#p~$'l5c$+XZQ1s&WW&ap&f#tOY$*|YZMmZ]$*|]^Mm^r$*|rs#6ds#O$*|#O#P$+z#P#o$*|#o#p$,`#p~$*|5c$,PTQ1sOY$*|YZMmZ]$*|]^Mm^~$*|5c$,gZQ1s&WWOY#9bYZ8tZ]#9b]^8t^r#9brs#:Us#O#9b#O#P#;h#P#o#9b#o#p$*|#p~#9b6y$-_TQ1sOY$'lYZKXZ]$'l]^KX^~$'l6y$-w]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p$'l#p~#=dMV$.}_Q1s&^7[&WW&ap&f#tOY$%hYZIqZ]$%h]^Iq^r$%hrs# rsw$%hwx$/|x#O$%h#O#P$&x#P#o$%h#o#p$-n#p#q$%h#q#r$'l#r~$%hKo$0Z]Q1s&^7[&WW&ap&f#tOY$/|YZ!!uZ]$/|]^!!u^r$/|rs#Djs#O$/|#O#P$1S#P#o$/|#o#p$,`#p#q$/|#q#r$*|#r~$/|Ko$1ZXQ1s&^7[OY$/|YZ!!uZ]$/|]^!!u^#o$/|#o#p$*|#p#q$/|#q#r$*|#r~$/|Mg$1}XQ1s&^7[OY!IYYZ$}Z]!IY]^$}^#o!IY#o#p$2j#p#q!IY#q#r$2j#r~!IY7Z$2{]Q1s&TS&WW&Z`&ap&d!b&f#tOY$2jYZ!$gZ]$2j]^!$g^r$2jrs#IYsw$2jwx$(tx#O$2j#O#P$3t#P#o$2j#o#p$4Y#p~$2j7Z$3yTQ1sOY$2jYZ!$gZ]$2j]^!$g^~$2j7Z$4c]Q1s&TS&WWOY#=dYZ:{Z]#=d]^:{^r#=drs#>`sw#=dwx#@Sx#O#=d#O#P#Av#P#o#=d#o#p$2j#p~#=dGz$5o]%jQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz$6{Z!s,W&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz$8R]%dQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{$9Z_&_`&^7[&WW&R,X&ap&f#tOY$:YYZIqZ]$:Y]^Iq^r$:Yrs$;jsw$:Ywx%%zx#O$:Y#O#P%!^#P#o$:Y#o#p%$x#p#q$:Y#q#r%!r#r~$:YGk$:k_&^7[&TS&WW&R,X&ap&d!b&f#tOY$:YYZIqZ]$:Y]^Iq^r$:Yrs$;jsw$:Ywx% ^x#O$:Y#O#P%!^#P#o$:Y#o#p%$x#p#q$:Y#q#r%!r#r~$:YFy$;u_&^7[&TS&R,X&d!bOY$<tYZ(}Z]$<t]^(}^r$<trs$Kvsw$<twx$>Sx#O$<t#O#P$?Q#P#o$<t#o#p$Gb#p#q$<t#q#r$?f#r~$<tFy$=T_&^7[&TS&WW&R,X&d!b&f#tOY$<tYZ(}Z]$<t]^(}^r$<trs$;jsw$<twx$>Sx#O$<t#O#P$?Q#P#o$<t#o#p$Gb#p#q$<t#q#r$?f#r~$<tFy$>_Z&^7[&WW&R,X&f#tOr(}rs)}sw(}wx={x#O(}#O#P2]#P#o(}#o#p:X#p#q(}#q#r2q#r~(}Fy$?VT&^7[O#o$<t#o#p$?f#p#q$<t#q#r$?f#r~$<t0m$?s]&TS&WW&R,X&d!b&f#tOY$?fYZ2qZ]$?f]^2q^r$?frs$@lsw$?fwx$Ffx#O$?f#O#P$G[#P#o$?f#o#p$Gb#p~$?f0m$@u]&TS&R,X&d!bOY$?fYZ2qZ]$?f]^2q^r$?frs$Answ$?fwx$Ffx#O$?f#O#P$G[#P#o$?f#o#p$Gb#p~$?f0m$Aw]&TS&R,X&d!bOY$?fYZ2qZ]$?f]^2q^r$?frs$Bpsw$?fwx$Ffx#O$?f#O#P$G[#P#o$?f#o#p$Gb#p~$?f-o$ByZ&TS&R,X&d!bOY$BpYZ.qZ]$Bp]^.q^w$Bpwx$Clx#O$Bp#O#P$DW#P#o$Bp#o#p$D^#p~$Bp-o$CqV&R,XOw.qwx/wx#O.q#O#P0^#P#o.q#o#p0d#p~.q-o$DZPO~$Bp-o$DeZ&TS&R,XOY$EWYZ1OZ]$EW]^1O^w$EWwx$Ezx#O$EW#O#P$F`#P#o$EW#o#p$Bp#p~$EW,]$E_X&TS&R,XOY$EWYZ1OZ]$EW]^1O^w$EWwx$Ezx#O$EW#O#P$F`#P~$EW,]$FPT&R,XOw1Owx1vx#O1O#O#P2V#P~1O,]$FcPO~$EW0m$FoX&WW&R,X&f#tOr2qrs3isw2qwx5sx#O2q#O#P:R#P#o2q#o#p:X#p~2q0m$G_PO~$?f0m$Gk]&TS&WW&R,XOY$HdYZ:{Z]$Hd]^:{^r$Hdrs$I`sw$Hdwx$KSx#O$Hd#O#P$Kp#P#o$Hd#o#p$?f#p~$Hd,e$HmZ&TS&WW&R,XOY$HdYZ:{Z]$Hd]^:{^r$Hdrs$I`sw$Hdwx$KSx#O$Hd#O#P$Kp#P~$Hd,e$IgZ&TS&R,XOY$HdYZ:{Z]$Hd]^:{^r$Hdrs$JYsw$Hdwx$KSx#O$Hd#O#P$Kp#P~$Hd,e$JaZ&TS&R,XOY$HdYZ:{Z]$Hd]^:{^r$Hdrs$EWsw$Hdwx$KSx#O$Hd#O#P$Kp#P~$Hd,e$KZV&WW&R,XOr:{rs;isw:{wx=Zx#O:{#O#P=u#P~:{,e$KsPO~$HdFy$LR_&^7[&TS&R,X&d!bOY$<tYZ(}Z]$<t]^(}^r$<trs$MQsw$<twx$>Sx#O$<t#O#P$?Q#P#o$<t#o#p$Gb#p#q$<t#q#r$?f#r~$<tC{$M]]&^7[&TS&R,X&d!bOY$MQYZ+uZ]$MQ]^+u^w$MQwx$NUx#O$MQ#O#P$Nx#P#o$MQ#o#p$D^#p#q$MQ#q#r$Bp#r~$MQC{$N]X&^7[&R,XOw+uwx-]x#O+u#O#P.]#P#o+u#o#p0d#p#q+u#q#r.q#r~+uC{$N}T&^7[O#o$MQ#o#p$Bp#p#q$MQ#q#r$Bp#r~$MQGk% kZ&^7[&WW&R,X&ap&f#tOrIqrs)}swIqwx! wx#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqGk%!cT&^7[O#o$:Y#o#p%!r#p#q$:Y#q#r%!r#r~$:Y1_%#R]&TS&WW&R,X&ap&d!b&f#tOY%!rYZKXZ]%!r]^KX^r%!rrs$@lsw%!rwx%#zx#O%!r#O#P%$r#P#o%!r#o#p%$x#p~%!r1_%$VX&WW&R,X&ap&f#tOrKXrs3iswKXwxLwx#OKX#O#PN}#P#oKX#o#p! T#p~KX1_%$uPO~%!r1_%%R]&TS&WW&R,XOY$HdYZ:{Z]$Hd]^:{^r$Hdrs$I`sw$Hdwx$KSx#O$Hd#O#P$Kp#P#o$Hd#o#p%!r#p~$HdGk%&XZ&^7[&WW&R,X&ap&f#tOrIqrs)}swIqwx%&zx#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqGk%'ZX&U!f&^7[&WW&S,X&ap&f#tOr!!urs?ms#O!!u#O#P!#m#P#o!!u#o#pNc#p#q!!u#q#rMm#r~!!uG{%(ZZf,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<u%)aZeR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{%*g_T,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsxz$}z{%+f{!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{%+y]_R&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{%-V]%g,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<u%.cZxR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Mg%/i^%h,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`!a%0e!a#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}B^%0xZ&q&j&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{%2O_!dQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%2}!P!Q$}!Q![%5_![#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{%3`]&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%4X!P#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{%4lZ!m,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%5rg!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%5_![!g$}!g!h%7Z!h!l$}!l!m%;k!m#O$}#O#P!$R#P#R$}#R#S%5_#S#X$}#X#Y%7Z#Y#^$}#^#_%;k#_#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%7la&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx{$}{|%8q|}$}}!O%8q!O!Q$}!Q![%9{![#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%9S]&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%9{![#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%:`c!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%9{![!l$}!l!m%;k!m#O$}#O#P!$R#P#R$}#R#S%9{#S#^$}#^#_%;k#_#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%<OZ!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{%=U_%iR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!P$}!P!Q%>T!Q!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz%>h]%kQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%?tu!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%BX!P!Q$}!Q![%Cc![!d$}!d!e%Ee!e!g$}!g!h%7Z!h!l$}!l!m%;k!m!q$}!q!r%H_!r!z$}!z!{%KR!{#O$}#O#P!$R#P#R$}#R#S%Cc#S#U$}#U#V%Ee#V#X$}#X#Y%7Z#Y#^$}#^#_%;k#_#c$}#c#d%H_#d#l$}#l#m%KR#m#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Bj]&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%5_![#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Cvi!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!O$}!O!P%BX!P!Q$}!Q![%Cc![!g$}!g!h%7Z!h!l$}!l!m%;k!m#O$}#O#P!$R#P#R$}#R#S%Cc#S#X$}#X#Y%7Z#Y#^$}#^#_%;k#_#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Ev`&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!R%Fx!R!S%Fx!S#O$}#O#P!$R#P#R$}#R#S%Fx#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%G]`!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!R%Fx!R!S%Fx!S#O$}#O#P!$R#P#R$}#R#S%Fx#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Hp_&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!Y%Io!Y#O$}#O#P!$R#P#R$}#R#S%Io#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%JS_!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q!Y%Io!Y#O$}#O#P!$R#P#R$}#R#S%Io#S#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%Kdc&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%Lo![!c$}!c!i%Lo!i#O$}#O#P!$R#P#R$}#R#S%Lo#S#T$}#T#Z%Lo#Z#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy%MSc!f,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!Q$}!Q![%Lo![!c$}!c!i%Lo!i#O$}#O#P!$R#P#R$}#R#S%Lo#S#T$}#T#Z%Lo#Z#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Mg%Nr]y1s&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`& k!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<u&!OZ%sR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{&#UZ#^,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{&$[_kR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!^$}!^!_&%Z!_!`!*Q!`!a!*Q!a#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz&%n]%eQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{&&z]%r,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`!*Q!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{&(W^kR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`!*Q!`!a&)S!a#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz&)g]%fQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}G{&*u]]Q#tP&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Mg&,Tc&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&Rsw$}wxHsx!Q$}!Q![&+n![!c$}!c!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nMg&-ug&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&/^sw$}wx&2dx!Q$}!Q![&+n![!c$}!c!t&+n!t!u&5j!u!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#f&+n#f#g&5j#g#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nGZ&/k_&^7[&TS&R,X&Z`&d!bOY!-lYZ'PZ]!-l]^'P^r!-lrs&0jsw!-lwx!/|x#O!-l#O#P!Cp#P#o!-l#o#p!F[#p#q!-l#q#r!DU#r~!-lGZ&0wZ&^7[&TS&R,X&Z`&d!bOr'Prs&1jsw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PD]&1wX&^7[&TS&V,X&Z`&d!bOwGgwx,kx#OGg#O#PH_#P#oGg#o#pET#p#qGg#q#rD_#r~GgGk&2q_&^7[&WW&R,X&ap&f#tOY$:YYZIqZ]$:Y]^Iq^r$:Yrs$;jsw$:Ywx&3px#O$:Y#O#P%!^#P#o$:Y#o#p%$x#p#q$:Y#q#r%!r#r~$:YGk&3}Z&^7[&WW&R,X&ap&f#tOrIqrs)}swIqwx&4px#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqFT&4}X&^7[&WW&S,X&ap&f#tOr!!urs?ms#O!!u#O#P!#m#P#o!!u#o#pNc#p#q!!u#q#rMm#r~!!uMg&6Pc&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&/^sw$}wx&2dx!Q$}!Q![&+n![!c$}!c!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nMg&7qg&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&9Ysw$}wx&<Qx!Q$}!Q![&+n![!c$}!c!t&+n!t!u&>x!u!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#f&+n#f#g&>x#g#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nGZ&9gZ&^7[&TS&Z`&d!b&`,XOr'Prs&:Ysw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PGZ&:eZ&^7[&TS&Z`&d!bOr'Prs&;Wsw'Pwx(Rx#O'P#O#PAe#P#o'P#o#pEu#p#q'P#q#rAy#r~'PD]&;eX&^7[&TS&e,X&Z`&d!bOwGgwx,kx#OGg#O#PH_#P#oGg#o#pET#p#qGg#q#rD_#r~GgGk&<_Z&^7[&WW&ap&f#t&Y,XOrIqrs)}swIqwx&=Qx#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqGk&=]Z&^7[&WW&ap&f#tOrIqrs)}swIqwx&>Ox#OIq#O#PJs#P#oIq#o#p! T#p#qIq#q#rKX#r~IqFT&>]X&^7[&WW&c,X&ap&f#tOr!!urs?ms#O!!u#O#P!#m#P#o!!u#o#pNc#p#q!!u#q#rMm#r~!!uMg&?_c&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&9Ysw$}wx&<Qx!Q$}!Q![&+n![!c$}!c!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nMg&APk&^7[&TS&WW&Q&j&Z`&ap&d!b&f#t%m,XOr$}rs&/^sw$}wx&2dx!Q$}!Q![&+n![!c$}!c!h&+n!h!i&>x!i!t&+n!t!u&5j!u!}&+n!}#O$}#O#P!$R#P#R$}#R#S&+n#S#T$}#T#U&+n#U#V&5j#V#Y&+n#Y#Z&>x#Z#o&+n#o#p!%i#p#q$}#q#r!$g#r$g$}$g~&+nG{&CXZ!V,X&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<u&D_Z!UR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gz&Ee]%cQ&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}Gy&FgX&TS&WW!ZGmOr:{rs;isw:{wx<ox#O:{#O#P=u#P#o:{#o#p!$g#p~:{G{&Gg]%bR&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx!_$}!_!`$6h!`#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}<u&HqX!Y7_&TS&WW&Z`&ap&d!b&f#tOr!$grsBssw!$gwxLRx#O!$g#O#P!%c#P#o!$g#o#p!%i#p~!$gGy&IqZ%l,V&^7[&TS&WW&Z`&ap&d!b&f#tOr$}rs&Rsw$}wxHsx#O$}#O#P!$R#P#o$}#o#p!%i#p#q$}#q#r!$g#r~$}",tokenizers:[eO,$O,0,1,2,3,4,5,6,7,8,9,10,OO],topRules:{Script:[0,3]},specialized:[{term:213,get:O=>iO[O]||-1}],tokenPrec:7282});function I(O,$){let Q=O.lineIndent($.from),P=O.lineAt(O.pos,-1),e=P.from+P.text.length;return!/\S/.test(P.text)&&O.node.to<e+100&&!/\S/.test(O.state.sliceDoc(e,O.node.to))&&O.lineIndent(O.pos,-1)<=Q||/^\s*(else:|elif |except |finally:)/.test(O.textAfter)&&O.lineIndent(O.pos,-1)>Q?null:Q+O.unit}const aO=R.define({name:"python",parser:oO.configure({props:[Z.add({Body:O=>{var $;return($=I(O,O.node))!==null&&$!==void 0?$:O.continue()},IfStatement:O=>/^\s*(else:|elif )/.test(O.textAfter)?O.baseIndent:O.continue(),TryStatement:O=>/^\s*(except |finally:|else:)/.test(O.textAfter)?O.baseIndent:O.continue(),"TupleExpression ComprehensionExpression ParamList ArgList ParenthesizedExpression":a({closing:")"}),"DictionaryExpression DictionaryComprehensionExpression SetExpression SetComprehensionExpression":a({closing:"}"}),"ArrayExpression ArrayComprehensionExpression":a({closing:"]"}),"String FormatString":()=>null,Script:O=>{if(O.pos+/\s*/.exec(O.textAfter)[0].length>=O.node.to){let $=null;for(let Q=O.node,P=Q.to;Q=Q.lastChild,!(!Q||Q.to!=P);)Q.type.name=="Body"&&($=Q);if($){let Q=I(O,$);if(Q!=null)return Q}}return O.continue()}}),X.add({"ArrayExpression DictionaryExpression SetExpression TupleExpression":y,Body:(O,$)=>({from:O.from+1,to:O.to-(O.to==$.doc.length?0:1)})})]}),languageData:{closeBrackets:{brackets:["(","[","{","'",'"',"'''",'"""'],stringPrefixes:["f","fr","rf","r","u","b","br","rb","F","FR","RF","R","U","B","BR","RB"]},commentTokens:{line:"#"},indentOnInput:/^\s*([\}\]\)]|else:|elif |except |finally:)$/}});function dO(){return new f(aO)}export{dO as python,aO as pythonLanguage};
2
- //# sourceMappingURL=index-f8a15c0a.js.map
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/lfs.py DELETED
@@ -1,496 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2019-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Git LFS related type definitions and utilities"""
16
- import io
17
- import os
18
- import re
19
- import warnings
20
- from contextlib import AbstractContextManager
21
- from dataclasses import dataclass
22
- from math import ceil
23
- from os.path import getsize
24
- from pathlib import Path
25
- from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional, Tuple
26
-
27
- from requests.auth import HTTPBasicAuth
28
-
29
- from huggingface_hub.constants import ENDPOINT, HF_HUB_ENABLE_HF_TRANSFER, REPO_TYPES_URL_PREFIXES
30
- from huggingface_hub.utils import get_session
31
-
32
- from .utils import get_token_to_send, hf_raise_for_status, http_backoff, logging, validate_hf_hub_args
33
- from .utils._typing import TypedDict
34
- from .utils.sha import sha256, sha_fileobj
35
-
36
-
37
- if TYPE_CHECKING:
38
- from ._commit_api import CommitOperationAdd
39
-
40
- logger = logging.get_logger(__name__)
41
-
42
- OID_REGEX = re.compile(r"^[0-9a-f]{40}$")
43
-
44
- LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload"
45
-
46
- LFS_HEADERS = {
47
- "Accept": "application/vnd.git-lfs+json",
48
- "Content-Type": "application/vnd.git-lfs+json",
49
- }
50
-
51
-
52
- @dataclass
53
- class UploadInfo:
54
- """
55
- Dataclass holding required information to determine whether a blob
56
- should be uploaded to the hub using the LFS protocol or the regular protocol
57
-
58
- Args:
59
- sha256 (`bytes`):
60
- SHA256 hash of the blob
61
- size (`int`):
62
- Size in bytes of the blob
63
- sample (`bytes`):
64
- First 512 bytes of the blob
65
- """
66
-
67
- sha256: bytes
68
- size: int
69
- sample: bytes
70
-
71
- @classmethod
72
- def from_path(cls, path: str):
73
- size = getsize(path)
74
- with io.open(path, "rb") as file:
75
- sample = file.peek(512)[:512]
76
- sha = sha_fileobj(file)
77
- return cls(size=size, sha256=sha, sample=sample)
78
-
79
- @classmethod
80
- def from_bytes(cls, data: bytes):
81
- sha = sha256(data).digest()
82
- return cls(size=len(data), sample=data[:512], sha256=sha)
83
-
84
- @classmethod
85
- def from_fileobj(cls, fileobj: BinaryIO):
86
- sample = fileobj.read(512)
87
- fileobj.seek(0, io.SEEK_SET)
88
- sha = sha_fileobj(fileobj)
89
- size = fileobj.tell()
90
- fileobj.seek(0, io.SEEK_SET)
91
- return cls(size=size, sha256=sha, sample=sample)
92
-
93
-
94
- @validate_hf_hub_args
95
- def post_lfs_batch_info(
96
- upload_infos: Iterable[UploadInfo],
97
- token: Optional[str],
98
- repo_type: str,
99
- repo_id: str,
100
- endpoint: Optional[str] = None,
101
- ) -> Tuple[List[dict], List[dict]]:
102
- """
103
- Requests the LFS batch endpoint to retrieve upload instructions
104
-
105
- Learn more: https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
106
-
107
- Args:
108
- upload_infos (`Iterable` of `UploadInfo`):
109
- `UploadInfo` for the files that are being uploaded, typically obtained
110
- from `CommitOperationAdd.upload_info`
111
- repo_type (`str`):
112
- Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
113
- repo_id (`str`):
114
- A namespace (user or an organization) and a repo name separated
115
- by a `/`.
116
- token (`str`, *optional*):
117
- An authentication token ( See https://huggingface.co/settings/tokens )
118
-
119
- Returns:
120
- `LfsBatchInfo`: 2-tuple:
121
- - First element is the list of upload instructions from the server
122
- - Second element is an list of errors, if any
123
-
124
- Raises:
125
- `ValueError`: If an argument is invalid or the server response is malformed
126
-
127
- `HTTPError`: If the server returned an error
128
- """
129
- endpoint = endpoint if endpoint is not None else ENDPOINT
130
- url_prefix = ""
131
- if repo_type in REPO_TYPES_URL_PREFIXES:
132
- url_prefix = REPO_TYPES_URL_PREFIXES[repo_type]
133
- batch_url = f"{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch"
134
- resp = get_session().post(
135
- batch_url,
136
- headers=LFS_HEADERS,
137
- json={
138
- "operation": "upload",
139
- "transfers": ["basic", "multipart"],
140
- "objects": [
141
- {
142
- "oid": upload.sha256.hex(),
143
- "size": upload.size,
144
- }
145
- for upload in upload_infos
146
- ],
147
- "hash_algo": "sha256",
148
- },
149
- auth=HTTPBasicAuth(
150
- "access_token",
151
- get_token_to_send(token or True), # type: ignore # Token must be provided or retrieved
152
- ),
153
- )
154
- hf_raise_for_status(resp)
155
- batch_info = resp.json()
156
-
157
- objects = batch_info.get("objects", None)
158
- if not isinstance(objects, list):
159
- raise ValueError("Malformed response from server")
160
-
161
- return (
162
- [_validate_batch_actions(obj) for obj in objects if "error" not in obj],
163
- [_validate_batch_error(obj) for obj in objects if "error" in obj],
164
- )
165
-
166
-
167
- class PayloadPartT(TypedDict):
168
- partNumber: int
169
- etag: str
170
-
171
-
172
- class CompletionPayloadT(TypedDict):
173
- """Payload that will be sent to the Hub when uploading multi-part."""
174
-
175
- oid: str
176
- parts: List[PayloadPartT]
177
-
178
-
179
- def lfs_upload(operation: "CommitOperationAdd", lfs_batch_action: Dict, token: Optional[str]) -> None:
180
- """
181
- Handles uploading a given object to the Hub with the LFS protocol.
182
-
183
- Can be a No-op if the content of the file is already present on the hub large file storage.
184
-
185
- Args:
186
- operation (`CommitOperationAdd`):
187
- The add operation triggering this upload.
188
- lfs_batch_action (`dict`):
189
- Upload instructions from the LFS batch endpoint for this object. See [`~utils.lfs.post_lfs_batch_info`] for
190
- more details.
191
- token (`str`, *optional*):
192
- A [user access token](https://hf.co/settings/tokens) to authenticate requests against the Hub
193
-
194
- Raises:
195
- - `ValueError` if `lfs_batch_action` is improperly formatted
196
- - `HTTPError` if the upload resulted in an error
197
- """
198
- # 0. If LFS file is already present, skip upload
199
- _validate_batch_actions(lfs_batch_action)
200
- actions = lfs_batch_action.get("actions")
201
- if actions is None:
202
- # The file was already uploaded
203
- logger.debug(f"Content of file {operation.path_in_repo} is already present upstream - skipping upload")
204
- return
205
-
206
- # 1. Validate server response (check required keys in dict)
207
- upload_action = lfs_batch_action["actions"]["upload"]
208
- _validate_lfs_action(upload_action)
209
- verify_action = lfs_batch_action["actions"].get("verify")
210
- if verify_action is not None:
211
- _validate_lfs_action(verify_action)
212
-
213
- # 2. Upload file (either single part or multi-part)
214
- header = upload_action.get("header", {})
215
- chunk_size = header.get("chunk_size")
216
- if chunk_size is not None:
217
- try:
218
- chunk_size = int(chunk_size)
219
- except (ValueError, TypeError):
220
- raise ValueError(
221
- f"Malformed response from LFS batch endpoint: `chunk_size` should be an integer. Got '{chunk_size}'."
222
- )
223
- _upload_multi_part(operation=operation, header=header, chunk_size=chunk_size, upload_url=upload_action["href"])
224
- else:
225
- _upload_single_part(operation=operation, upload_url=upload_action["href"])
226
-
227
- # 3. Verify upload went well
228
- if verify_action is not None:
229
- _validate_lfs_action(verify_action)
230
- verify_resp = get_session().post(
231
- verify_action["href"],
232
- auth=HTTPBasicAuth(username="USER", password=get_token_to_send(token or True)), # type: ignore
233
- json={"oid": operation.upload_info.sha256.hex(), "size": operation.upload_info.size},
234
- )
235
- hf_raise_for_status(verify_resp)
236
- logger.debug(f"{operation.path_in_repo}: Upload successful")
237
-
238
-
239
- def _validate_lfs_action(lfs_action: dict):
240
- """validates response from the LFS batch endpoint"""
241
- if not (
242
- isinstance(lfs_action.get("href"), str)
243
- and (lfs_action.get("header") is None or isinstance(lfs_action.get("header"), dict))
244
- ):
245
- raise ValueError("lfs_action is improperly formatted")
246
- return lfs_action
247
-
248
-
249
- def _validate_batch_actions(lfs_batch_actions: dict):
250
- """validates response from the LFS batch endpoint"""
251
- if not (isinstance(lfs_batch_actions.get("oid"), str) and isinstance(lfs_batch_actions.get("size"), int)):
252
- raise ValueError("lfs_batch_actions is improperly formatted")
253
-
254
- upload_action = lfs_batch_actions.get("actions", {}).get("upload")
255
- verify_action = lfs_batch_actions.get("actions", {}).get("verify")
256
- if upload_action is not None:
257
- _validate_lfs_action(upload_action)
258
- if verify_action is not None:
259
- _validate_lfs_action(verify_action)
260
- return lfs_batch_actions
261
-
262
-
263
- def _validate_batch_error(lfs_batch_error: dict):
264
- """validates response from the LFS batch endpoint"""
265
- if not (isinstance(lfs_batch_error.get("oid"), str) and isinstance(lfs_batch_error.get("size"), int)):
266
- raise ValueError("lfs_batch_error is improperly formatted")
267
- error_info = lfs_batch_error.get("error")
268
- if not (
269
- isinstance(error_info, dict)
270
- and isinstance(error_info.get("message"), str)
271
- and isinstance(error_info.get("code"), int)
272
- ):
273
- raise ValueError("lfs_batch_error is improperly formatted")
274
- return lfs_batch_error
275
-
276
-
277
- def _upload_single_part(operation: "CommitOperationAdd", upload_url: str) -> None:
278
- """
279
- Uploads `fileobj` as a single PUT HTTP request (basic LFS transfer protocol)
280
-
281
- Args:
282
- upload_url (`str`):
283
- The URL to PUT the file to.
284
- fileobj:
285
- The file-like object holding the data to upload.
286
-
287
- Returns: `requests.Response`
288
-
289
- Raises: `requests.HTTPError` if the upload resulted in an error
290
- """
291
- with operation.as_file(with_tqdm=True) as fileobj:
292
- response = http_backoff("PUT", upload_url, data=fileobj)
293
- hf_raise_for_status(response)
294
-
295
-
296
- def _upload_multi_part(operation: "CommitOperationAdd", header: Dict, chunk_size: int, upload_url: str) -> None:
297
- """
298
- Uploads file using HF multipart LFS transfer protocol.
299
- """
300
- # 1. Get upload URLs for each part
301
- sorted_parts_urls = _get_sorted_parts_urls(header=header, upload_info=operation.upload_info, chunk_size=chunk_size)
302
-
303
- # 2. Upload parts (either with hf_transfer or in pure Python)
304
- use_hf_transfer = HF_HUB_ENABLE_HF_TRANSFER
305
- if (
306
- HF_HUB_ENABLE_HF_TRANSFER
307
- and not isinstance(operation.path_or_fileobj, str)
308
- and not isinstance(operation.path_or_fileobj, Path)
309
- ):
310
- warnings.warn(
311
- "hf_transfer is enabled but does not support uploading from bytes or BinaryIO, falling back to regular"
312
- " upload"
313
- )
314
- use_hf_transfer = False
315
-
316
- response_headers = (
317
- _upload_parts_hf_transfer(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size)
318
- if use_hf_transfer
319
- else _upload_parts_iteratively(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size)
320
- )
321
-
322
- # 3. Send completion request
323
- completion_res = get_session().post(
324
- upload_url,
325
- json=_get_completion_payload(response_headers, operation.upload_info.sha256.hex()),
326
- headers=LFS_HEADERS,
327
- )
328
- hf_raise_for_status(completion_res)
329
-
330
-
331
- def _get_sorted_parts_urls(header: Dict, upload_info: UploadInfo, chunk_size: int) -> List[str]:
332
- sorted_part_upload_urls = [
333
- upload_url
334
- for _, upload_url in sorted(
335
- [
336
- (int(part_num, 10), upload_url)
337
- for part_num, upload_url in header.items()
338
- if part_num.isdigit() and len(part_num) > 0
339
- ],
340
- key=lambda t: t[0],
341
- )
342
- ]
343
- num_parts = len(sorted_part_upload_urls)
344
- if num_parts != ceil(upload_info.size / chunk_size):
345
- raise ValueError("Invalid server response to upload large LFS file")
346
- return sorted_part_upload_urls
347
-
348
-
349
- def _get_completion_payload(response_headers: List[Dict], oid: str) -> CompletionPayloadT:
350
- parts: List[PayloadPartT] = []
351
- for part_number, header in enumerate(response_headers):
352
- etag = header.get("etag")
353
- if etag is None or etag == "":
354
- raise ValueError(f"Invalid etag (`{etag}`) returned for part {part_number + 1}")
355
- parts.append(
356
- {
357
- "partNumber": part_number + 1,
358
- "etag": etag,
359
- }
360
- )
361
- return {"oid": oid, "parts": parts}
362
-
363
-
364
- def _upload_parts_iteratively(
365
- operation: "CommitOperationAdd", sorted_parts_urls: List[str], chunk_size: int
366
- ) -> List[Dict]:
367
- headers = []
368
- with operation.as_file(with_tqdm=True) as fileobj:
369
- for part_idx, part_upload_url in enumerate(sorted_parts_urls):
370
- with SliceFileObj(
371
- fileobj,
372
- seek_from=chunk_size * part_idx,
373
- read_limit=chunk_size,
374
- ) as fileobj_slice:
375
- part_upload_res = http_backoff("PUT", part_upload_url, data=fileobj_slice)
376
- hf_raise_for_status(part_upload_res)
377
- headers.append(part_upload_res.headers)
378
- return headers # type: ignore
379
-
380
-
381
- def _upload_parts_hf_transfer(
382
- operation: "CommitOperationAdd", sorted_parts_urls: List[str], chunk_size: int
383
- ) -> List[Dict]:
384
- # Upload file using an external Rust-based package. Upload is faster but support less features (no progress bars).
385
- try:
386
- from hf_transfer import multipart_upload
387
- except ImportError:
388
- raise ValueError(
389
- "Fast uploading using 'hf_transfer' is enabled (HF_HUB_ENABLE_HF_TRANSFER=1) but 'hf_transfer' package is"
390
- " not available in your environment. Try `pip install hf_transfer`."
391
- )
392
-
393
- try:
394
- return multipart_upload(
395
- file_path=operation.path_or_fileobj,
396
- parts_urls=sorted_parts_urls,
397
- chunk_size=chunk_size,
398
- max_files=128,
399
- parallel_failures=127, # could be removed
400
- max_retries=5,
401
- )
402
- except Exception as e:
403
- raise RuntimeError(
404
- "An error occurred while uploading using `hf_transfer`. Consider disabling HF_HUB_ENABLE_HF_TRANSFER for"
405
- " better error handling."
406
- ) from e
407
-
408
-
409
- class SliceFileObj(AbstractContextManager):
410
- """
411
- Utility context manager to read a *slice* of a seekable file-like object as a seekable, file-like object.
412
-
413
- This is NOT thread safe
414
-
415
- Inspired by stackoverflow.com/a/29838711/593036
416
-
417
- Credits to @julien-c
418
-
419
- Args:
420
- fileobj (`BinaryIO`):
421
- A file-like object to slice. MUST implement `tell()` and `seek()` (and `read()` of course).
422
- `fileobj` will be reset to its original position when exiting the context manager.
423
- seek_from (`int`):
424
- The start of the slice (offset from position 0 in bytes).
425
- read_limit (`int`):
426
- The maximum number of bytes to read from the slice.
427
-
428
- Attributes:
429
- previous_position (`int`):
430
- The previous position
431
-
432
- Examples:
433
-
434
- Reading 200 bytes with an offset of 128 bytes from a file (ie bytes 128 to 327):
435
- ```python
436
- >>> with open("path/to/file", "rb") as file:
437
- ... with SliceFileObj(file, seek_from=128, read_limit=200) as fslice:
438
- ... fslice.read(...)
439
- ```
440
-
441
- Reading a file in chunks of 512 bytes
442
- ```python
443
- >>> import os
444
- >>> chunk_size = 512
445
- >>> file_size = os.getsize("path/to/file")
446
- >>> with open("path/to/file", "rb") as file:
447
- ... for chunk_idx in range(ceil(file_size / chunk_size)):
448
- ... with SliceFileObj(file, seek_from=chunk_idx * chunk_size, read_limit=chunk_size) as fslice:
449
- ... chunk = fslice.read(...)
450
-
451
- ```
452
- """
453
-
454
- def __init__(self, fileobj: BinaryIO, seek_from: int, read_limit: int):
455
- self.fileobj = fileobj
456
- self.seek_from = seek_from
457
- self.read_limit = read_limit
458
-
459
- def __enter__(self):
460
- self._previous_position = self.fileobj.tell()
461
- end_of_stream = self.fileobj.seek(0, os.SEEK_END)
462
- self._len = min(self.read_limit, end_of_stream - self.seek_from)
463
- # ^^ The actual number of bytes that can be read from the slice
464
- self.fileobj.seek(self.seek_from, io.SEEK_SET)
465
- return self
466
-
467
- def __exit__(self, exc_type, exc_value, traceback):
468
- self.fileobj.seek(self._previous_position, io.SEEK_SET)
469
-
470
- def read(self, n: int = -1):
471
- pos = self.tell()
472
- if pos >= self._len:
473
- return b""
474
- remaining_amount = self._len - pos
475
- data = self.fileobj.read(remaining_amount if n < 0 else min(n, remaining_amount))
476
- return data
477
-
478
- def tell(self) -> int:
479
- return self.fileobj.tell() - self.seek_from
480
-
481
- def seek(self, offset: int, whence: int = os.SEEK_SET) -> int:
482
- start = self.seek_from
483
- end = start + self._len
484
- if whence in (os.SEEK_SET, os.SEEK_END):
485
- offset = start + offset if whence == os.SEEK_SET else end + offset
486
- offset = max(start, min(offset, end))
487
- whence = os.SEEK_SET
488
- elif whence == os.SEEK_CUR:
489
- cur_pos = self.fileobj.tell()
490
- offset = max(start - cur_pos, min(offset, end - cur_pos))
491
- else:
492
- raise ValueError(f"whence value {whence} is not supported")
493
- return self.fileobj.seek(offset, whence) - self.seek_from
494
-
495
- def __iter__(self):
496
- yield self.read(n=4 * 1024 * 1024)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_runtime.py DELETED
@@ -1,328 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Check presence of installed packages at runtime."""
16
- import platform
17
- import sys
18
- from typing import Any, Dict
19
-
20
- import packaging.version
21
-
22
- from .. import __version__, constants
23
-
24
-
25
- _PY_VERSION: str = sys.version.split()[0].rstrip("+")
26
-
27
- if packaging.version.Version(_PY_VERSION) < packaging.version.Version("3.8.0"):
28
- import importlib_metadata # type: ignore
29
- else:
30
- import importlib.metadata as importlib_metadata # type: ignore
31
-
32
-
33
- _package_versions = {}
34
-
35
- _CANDIDATES = {
36
- "aiohttp": {"aiohttp"},
37
- "fastai": {"fastai"},
38
- "fastcore": {"fastcore"},
39
- "gradio": {"gradio"},
40
- "graphviz": {"graphviz"},
41
- "hf_transfer": {"hf_transfer"},
42
- "jinja": {"Jinja2"},
43
- "numpy": {"numpy"},
44
- "pillow": {"Pillow"},
45
- "pydantic": {"pydantic"},
46
- "pydot": {"pydot"},
47
- "tensorboard": {"tensorboardX"},
48
- "tensorflow": (
49
- "tensorflow",
50
- "tensorflow-cpu",
51
- "tensorflow-gpu",
52
- "tf-nightly",
53
- "tf-nightly-cpu",
54
- "tf-nightly-gpu",
55
- "intel-tensorflow",
56
- "intel-tensorflow-avx512",
57
- "tensorflow-rocm",
58
- "tensorflow-macos",
59
- ),
60
- "torch": {"torch"},
61
- }
62
-
63
- # Check once at runtime
64
- for candidate_name, package_names in _CANDIDATES.items():
65
- _package_versions[candidate_name] = "N/A"
66
- for name in package_names:
67
- try:
68
- _package_versions[candidate_name] = importlib_metadata.version(name)
69
- break
70
- except importlib_metadata.PackageNotFoundError:
71
- pass
72
-
73
-
74
- def _get_version(package_name: str) -> str:
75
- return _package_versions.get(package_name, "N/A")
76
-
77
-
78
- def _is_available(package_name: str) -> bool:
79
- return _get_version(package_name) != "N/A"
80
-
81
-
82
- # Python
83
- def get_python_version() -> str:
84
- return _PY_VERSION
85
-
86
-
87
- # Huggingface Hub
88
- def get_hf_hub_version() -> str:
89
- return __version__
90
-
91
-
92
- # aiohttp
93
- def is_aiohttp_available() -> bool:
94
- return _is_available("aiohttp")
95
-
96
-
97
- def get_aiohttp_version() -> str:
98
- return _get_version("aiohttp")
99
-
100
-
101
- # FastAI
102
- def is_fastai_available() -> bool:
103
- return _is_available("fastai")
104
-
105
-
106
- def get_fastai_version() -> str:
107
- return _get_version("fastai")
108
-
109
-
110
- # Fastcore
111
- def is_fastcore_available() -> bool:
112
- return _is_available("fastcore")
113
-
114
-
115
- def get_fastcore_version() -> str:
116
- return _get_version("fastcore")
117
-
118
-
119
- # FastAI
120
- def is_gradio_available() -> bool:
121
- return _is_available("gradio")
122
-
123
-
124
- def get_gradio_version() -> str:
125
- return _get_version("gradio")
126
-
127
-
128
- # Graphviz
129
- def is_graphviz_available() -> bool:
130
- return _is_available("graphviz")
131
-
132
-
133
- def get_graphviz_version() -> str:
134
- return _get_version("graphviz")
135
-
136
-
137
- # hf_transfer
138
- def is_hf_transfer_available() -> bool:
139
- return _is_available("hf_transfer")
140
-
141
-
142
- def get_hf_transfer_version() -> str:
143
- return _get_version("hf_transfer")
144
-
145
-
146
- # Numpy
147
- def is_numpy_available() -> bool:
148
- return _is_available("numpy")
149
-
150
-
151
- def get_numpy_version() -> str:
152
- return _get_version("numpy")
153
-
154
-
155
- # Jinja
156
- def is_jinja_available() -> bool:
157
- return _is_available("jinja")
158
-
159
-
160
- def get_jinja_version() -> str:
161
- return _get_version("jinja")
162
-
163
-
164
- # Pillow
165
- def is_pillow_available() -> bool:
166
- return _is_available("pillow")
167
-
168
-
169
- def get_pillow_version() -> str:
170
- return _get_version("pillow")
171
-
172
-
173
- # Pydantic
174
- def is_pydantic_available() -> bool:
175
- return _is_available("pydantic")
176
-
177
-
178
- def get_pydantic_version() -> str:
179
- return _get_version("pydantic")
180
-
181
-
182
- # Pydot
183
- def is_pydot_available() -> bool:
184
- return _is_available("pydot")
185
-
186
-
187
- def get_pydot_version() -> str:
188
- return _get_version("pydot")
189
-
190
-
191
- # Tensorboard
192
- def is_tensorboard_available() -> bool:
193
- return _is_available("tensorboard")
194
-
195
-
196
- def get_tensorboard_version() -> str:
197
- return _get_version("tensorboard")
198
-
199
-
200
- # Tensorflow
201
- def is_tf_available() -> bool:
202
- return _is_available("tensorflow")
203
-
204
-
205
- def get_tf_version() -> str:
206
- return _get_version("tensorflow")
207
-
208
-
209
- # Torch
210
- def is_torch_available() -> bool:
211
- return _is_available("torch")
212
-
213
-
214
- def get_torch_version() -> str:
215
- return _get_version("torch")
216
-
217
-
218
- # Shell-related helpers
219
- try:
220
- # Set to `True` if script is running in a Google Colab notebook.
221
- # If running in Google Colab, git credential store is set globally which makes the
222
- # warning disappear. See https://github.com/huggingface/huggingface_hub/issues/1043
223
- #
224
- # Taken from https://stackoverflow.com/a/63519730.
225
- _is_google_colab = "google.colab" in str(get_ipython()) # type: ignore # noqa: F821
226
- except NameError:
227
- _is_google_colab = False
228
-
229
-
230
- def is_notebook() -> bool:
231
- """Return `True` if code is executed in a notebook (Jupyter, Colab, QTconsole).
232
-
233
- Taken from https://stackoverflow.com/a/39662359.
234
- Adapted to make it work with Google colab as well.
235
- """
236
- try:
237
- shell_class = get_ipython().__class__ # type: ignore # noqa: F821
238
- for parent_class in shell_class.__mro__: # e.g. "is subclass of"
239
- if parent_class.__name__ == "ZMQInteractiveShell":
240
- return True # Jupyter notebook, Google colab or qtconsole
241
- return False
242
- except NameError:
243
- return False # Probably standard Python interpreter
244
-
245
-
246
- def is_google_colab() -> bool:
247
- """Return `True` if code is executed in a Google colab.
248
-
249
- Taken from https://stackoverflow.com/a/63519730.
250
- """
251
- return _is_google_colab
252
-
253
-
254
- def dump_environment_info() -> Dict[str, Any]:
255
- """Dump information about the machine to help debugging issues.
256
-
257
- Similar helper exist in:
258
- - `datasets` (https://github.com/huggingface/datasets/blob/main/src/datasets/commands/env.py)
259
- - `diffusers` (https://github.com/huggingface/diffusers/blob/main/src/diffusers/commands/env.py)
260
- - `transformers` (https://github.com/huggingface/transformers/blob/main/src/transformers/commands/env.py)
261
- """
262
- from huggingface_hub import HfFolder, whoami
263
- from huggingface_hub.utils import list_credential_helpers
264
-
265
- token = HfFolder().get_token()
266
-
267
- # Generic machine info
268
- info: Dict[str, Any] = {
269
- "huggingface_hub version": get_hf_hub_version(),
270
- "Platform": platform.platform(),
271
- "Python version": get_python_version(),
272
- }
273
-
274
- # Interpreter info
275
- try:
276
- shell_class = get_ipython().__class__ # type: ignore # noqa: F821
277
- info["Running in iPython ?"] = "Yes"
278
- info["iPython shell"] = shell_class.__name__
279
- except NameError:
280
- info["Running in iPython ?"] = "No"
281
- info["Running in notebook ?"] = "Yes" if is_notebook() else "No"
282
- info["Running in Google Colab ?"] = "Yes" if is_google_colab() else "No"
283
-
284
- # Login info
285
- info["Token path ?"] = HfFolder().path_token
286
- info["Has saved token ?"] = token is not None
287
- if token is not None:
288
- try:
289
- info["Who am I ?"] = whoami()["name"]
290
- except Exception:
291
- pass
292
-
293
- try:
294
- info["Configured git credential helpers"] = ", ".join(list_credential_helpers())
295
- except Exception:
296
- pass
297
-
298
- # Installed dependencies
299
- info["FastAI"] = get_fastai_version()
300
- info["Tensorflow"] = get_tf_version()
301
- info["Torch"] = get_torch_version()
302
- info["Jinja2"] = get_jinja_version()
303
- info["Graphviz"] = get_graphviz_version()
304
- info["Pydot"] = get_pydot_version()
305
- info["Pillow"] = get_pillow_version()
306
- info["hf_transfer"] = get_hf_transfer_version()
307
- info["gradio"] = get_gradio_version()
308
- info["tensorboard"] = get_tensorboard_version()
309
- info["numpy"] = get_numpy_version()
310
- info["pydantic"] = get_pydantic_version()
311
- info["aiohttp"] = get_aiohttp_version()
312
-
313
- # Environment variables
314
- info["ENDPOINT"] = constants.ENDPOINT
315
- info["HUGGINGFACE_HUB_CACHE"] = constants.HUGGINGFACE_HUB_CACHE
316
- info["HUGGINGFACE_ASSETS_CACHE"] = constants.HUGGINGFACE_ASSETS_CACHE
317
- info["HF_TOKEN_PATH"] = constants.HF_TOKEN_PATH
318
- info["HF_HUB_OFFLINE"] = constants.HF_HUB_OFFLINE
319
- info["HF_HUB_DISABLE_TELEMETRY"] = constants.HF_HUB_DISABLE_TELEMETRY
320
- info["HF_HUB_DISABLE_PROGRESS_BARS"] = constants.HF_HUB_DISABLE_PROGRESS_BARS
321
- info["HF_HUB_DISABLE_SYMLINKS_WARNING"] = constants.HF_HUB_DISABLE_SYMLINKS_WARNING
322
- info["HF_HUB_DISABLE_EXPERIMENTAL_WARNING"] = constants.HF_HUB_DISABLE_EXPERIMENTAL_WARNING
323
- info["HF_HUB_DISABLE_IMPLICIT_TOKEN"] = constants.HF_HUB_DISABLE_IMPLICIT_TOKEN
324
- info["HF_HUB_ENABLE_HF_TRANSFER"] = constants.HF_HUB_ENABLE_HF_TRANSFER
325
-
326
- print("\nCopy-and-paste the text below in your GitHub issue.\n")
327
- print("\n".join([f"- {prop}: {val}" for prop, val in info.items()]) + "\n")
328
- return info
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Daextream/Whisper-Auto-Subtitled-Video-Generator/pages/04_🔊_Upload_Audio_File.py DELETED
@@ -1,205 +0,0 @@
1
- import whisper
2
- import streamlit as st
3
- from streamlit_lottie import st_lottie
4
- from utils import write_vtt, write_srt
5
- import ffmpeg
6
- import requests
7
- from typing import Iterator
8
- from io import StringIO
9
- import numpy as np
10
- import pathlib
11
- import os
12
-
13
- st.set_page_config(page_title="Auto Transcriber", page_icon="🔊", layout="wide")
14
-
15
- # Define a function that we can use to load lottie files from a link.
16
- @st.cache(allow_output_mutation=True)
17
- def load_lottieurl(url: str):
18
- r = requests.get(url)
19
- if r.status_code != 200:
20
- return None
21
- return r.json()
22
-
23
-
24
- APP_DIR = pathlib.Path(__file__).parent.absolute()
25
-
26
- LOCAL_DIR = APP_DIR / "local_audio"
27
- LOCAL_DIR.mkdir(exist_ok=True)
28
- save_dir = LOCAL_DIR / "output"
29
- save_dir.mkdir(exist_ok=True)
30
-
31
-
32
- col1, col2 = st.columns([1, 3])
33
- with col1:
34
- lottie = load_lottieurl("https://assets1.lottiefiles.com/packages/lf20_1xbk4d2v.json")
35
- st_lottie(lottie)
36
-
37
- with col2:
38
- st.write("""
39
- ## Auto Transcriber
40
- ##### Input an audio file and get a transcript.
41
- ###### ➠ If you want to transcribe the audio in its original language, select the task as "Transcribe"
42
- ###### ➠ If you want to translate the transcription to English, select the task as "Translate"
43
- ###### I recommend starting with the base model and then experimenting with the larger models, the small and medium models often work well. """)
44
-
45
- loaded_model = whisper.load_model("base")
46
- current_size = "None"
47
-
48
-
49
- @st.cache(allow_output_mutation=True)
50
- def change_model(current_size, size):
51
- if current_size != size:
52
- loaded_model = whisper.load_model(size)
53
- return loaded_model
54
- else:
55
- raise Exception("Model size is the same as the current size.")
56
-
57
- @st.cache(allow_output_mutation=True)
58
- def inferecence(loaded_model, uploaded_file, task):
59
- with open(f"{save_dir}/input.mp3", "wb") as f:
60
- f.write(uploaded_file.read())
61
- audio = ffmpeg.input(f"{save_dir}/input.mp3")
62
- audio = ffmpeg.output(audio, f"{save_dir}/output.wav", acodec="pcm_s16le", ac=1, ar="16k")
63
- ffmpeg.run(audio, overwrite_output=True)
64
- if task == "Transcribe":
65
- options = dict(task="transcribe", best_of=5)
66
- results = loaded_model.transcribe(f"{save_dir}/output.wav", **options)
67
- vtt = getSubs(results["segments"], "vtt", 80)
68
- srt = getSubs(results["segments"], "srt", 80)
69
- lang = results["language"]
70
- return results["text"], vtt, srt, lang
71
- elif task == "Translate":
72
- options = dict(task="translate", best_of=5)
73
- results = loaded_model.transcribe(f"{save_dir}/output.wav", **options)
74
- vtt = getSubs(results["segments"], "vtt", 80)
75
- srt = getSubs(results["segments"], "srt", 80)
76
- lang = results["language"]
77
- return results["text"], vtt, srt, lang
78
- else:
79
- raise ValueError("Task not supported")
80
-
81
-
82
- def getSubs(segments: Iterator[dict], format: str, maxLineWidth: int) -> str:
83
- segmentStream = StringIO()
84
-
85
- if format == 'vtt':
86
- write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
87
- elif format == 'srt':
88
- write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth)
89
- else:
90
- raise Exception("Unknown format " + format)
91
-
92
- segmentStream.seek(0)
93
- return segmentStream.read()
94
-
95
-
96
- def main():
97
- size = st.selectbox("Select Model Size (The larger the model, the more accurate the transcription will be, but it will take longer)", ["tiny", "base", "small", "medium", "large"], index=1)
98
- loaded_model = change_model(current_size, size)
99
- st.write(f"Model is {'multilingual' if loaded_model.is_multilingual else 'English-only'} "
100
- f"and has {sum(np.prod(p.shape) for p in loaded_model.parameters()):,} parameters.")
101
- input_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "m4a"])
102
- if input_file is not None:
103
- filename = input_file.name[:-4]
104
- else:
105
- filename = None
106
- task = st.selectbox("Select Task", ["Transcribe", "Translate"], index=0)
107
- if task == "Transcribe":
108
- if st.button("Transcribe"):
109
- results = inferecence(loaded_model, input_file, task)
110
- col3, col4 = st.columns(2)
111
- col5, col6, col7 = st.columns(3)
112
- col9, col10 = st.columns(2)
113
-
114
- with col3:
115
- st.audio(input_file)
116
-
117
- with open("transcript.txt", "w+", encoding='utf8') as f:
118
- f.writelines(results[0])
119
- f.close()
120
- with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
121
- datatxt = f.read()
122
-
123
-
124
- with open("transcript.vtt", "w+",encoding='utf8') as f:
125
- f.writelines(results[1])
126
- f.close()
127
- with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
128
- datavtt = f.read()
129
-
130
- with open("transcript.srt", "w+",encoding='utf8') as f:
131
- f.writelines(results[2])
132
- f.close()
133
- with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
134
- datasrt = f.read()
135
-
136
- with col5:
137
- st.download_button(label="Download Transcript (.txt)",
138
- data=datatxt,
139
- file_name="transcript.txt")
140
- with col6:
141
- st.download_button(label="Download Transcript (.vtt)",
142
- data=datavtt,
143
- file_name="transcript.vtt")
144
- with col7:
145
- st.download_button(label="Download Transcript (.srt)",
146
- data=datasrt,
147
- file_name="transcript.srt")
148
- with col9:
149
- st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
150
- with col10:
151
- st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
152
-
153
- elif task == "Translate":
154
- if st.button("Translate to English"):
155
- results = inferecence(loaded_model, input_file, task)
156
- col3, col4 = st.columns(2)
157
- col5, col6, col7 = st.columns(3)
158
- col9, col10 = st.columns(2)
159
-
160
- with col3:
161
- st.audio(input_file)
162
-
163
- with open("transcript.txt", "w+", encoding='utf8') as f:
164
- f.writelines(results[0])
165
- f.close()
166
- with open(os.path.join(os.getcwd(), "transcript.txt"), "rb") as f:
167
- datatxt = f.read()
168
-
169
-
170
- with open("transcript.vtt", "w+",encoding='utf8') as f:
171
- f.writelines(results[1])
172
- f.close()
173
- with open(os.path.join(os.getcwd(), "transcript.vtt"), "rb") as f:
174
- datavtt = f.read()
175
-
176
- with open("transcript.srt", "w+",encoding='utf8') as f:
177
- f.writelines(results[2])
178
- f.close()
179
- with open(os.path.join(os.getcwd(), "transcript.srt"), "rb") as f:
180
- datasrt = f.read()
181
-
182
- with col5:
183
- st.download_button(label="Download Transcript (.txt)",
184
- data=datatxt,
185
- file_name="transcript.txt")
186
- with col6:
187
- st.download_button(label="Download Transcript (.vtt)",
188
- data=datavtt,
189
- file_name="transcript.vtt")
190
- with col7:
191
- st.download_button(label="Download Transcript (.srt)",
192
- data=datasrt,
193
- file_name="transcript.srt")
194
- with col9:
195
- st.success("You can download the transcript in .srt format, edit it (if you need to) and upload it to YouTube to create subtitles for your video.")
196
- with col10:
197
- st.info("Streamlit refreshes after the download button is clicked. The data is cached so you can download the transcript again without having to transcribe the video again.")
198
-
199
- else:
200
- st.error("Please select a task.")
201
-
202
-
203
- if __name__ == "__main__":
204
- main()
205
- st.markdown("###### Made with :heart: by [@BatuhanYılmaz](https://twitter.com/batuhan3326) [![this is an image link](https://i.imgur.com/thJhzOO.png)](https://www.buymeacoffee.com/batuhanylmz)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Davidsamuel101/PPTGenerator/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: PPTGenerator
3
- emoji: 📊
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: src/app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
-
14
- <!-- To run the app locally run `gradio app.py` -->
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/tensorlowest.py DELETED
@@ -1,123 +0,0 @@
1
- from tensorboard.backend.event_processing import event_accumulator
2
-
3
- import os
4
- from shutil import copy2
5
- from re import search as RSearch
6
- import pandas as pd
7
- from ast import literal_eval as LEval
8
-
9
- weights_dir = 'weights/'
10
-
11
- def find_biggest_tensorboard(tensordir):
12
- try:
13
- files = [f for f in os.listdir(tensordir) if f.endswith('.0')]
14
- if not files:
15
- print("No files with the '.0' extension found!")
16
- return
17
-
18
- max_size = 0
19
- biggest_file = ""
20
-
21
- for file in files:
22
- file_path = os.path.join(tensordir, file)
23
- if os.path.isfile(file_path):
24
- file_size = os.path.getsize(file_path)
25
- if file_size > max_size:
26
- max_size = file_size
27
- biggest_file = file
28
-
29
- return biggest_file
30
-
31
- except FileNotFoundError:
32
- print("Couldn't find your model!")
33
- return
34
-
35
- def main(model_name, save_freq, lastmdls):
36
- global lowestval_weight_dir, scl
37
-
38
- tensordir = os.path.join('logs', model_name)
39
- lowestval_weight_dir = os.path.join(tensordir, "lowestvals")
40
-
41
- latest_file = find_biggest_tensorboard(tensordir)
42
-
43
- if latest_file is None:
44
- print("Couldn't find a valid tensorboard file!")
45
- return
46
-
47
- tfile = os.path.join(tensordir, latest_file)
48
-
49
- ea = event_accumulator.EventAccumulator(tfile,
50
- size_guidance={
51
- event_accumulator.COMPRESSED_HISTOGRAMS: 500,
52
- event_accumulator.IMAGES: 4,
53
- event_accumulator.AUDIO: 4,
54
- event_accumulator.SCALARS: 0,
55
- event_accumulator.HISTOGRAMS: 1,
56
- })
57
-
58
- ea.Reload()
59
- ea.Tags()
60
-
61
- scl = ea.Scalars('loss/g/total')
62
-
63
- listwstep = {}
64
-
65
- for val in scl:
66
- if (val.step // save_freq) * save_freq in [val.step for val in scl]:
67
- listwstep[float(val.value)] = (val.step // save_freq) * save_freq
68
-
69
- lowest_vals = sorted(listwstep.keys())[:lastmdls]
70
-
71
- sorted_dict = {value: step for value, step in listwstep.items() if value in lowest_vals}
72
-
73
- return sorted_dict
74
-
75
- def selectweights(model_name, file_dict, weights_dir, lowestval_weight_dir):
76
- os.makedirs(lowestval_weight_dir, exist_ok=True)
77
- logdir = []
78
- files = []
79
- lbldict = {
80
- 'Values': {},
81
- 'Names': {}
82
- }
83
- weights_dir_path = os.path.join(weights_dir, "")
84
- low_val_path = os.path.join(os.getcwd(), os.path.join(lowestval_weight_dir, ""))
85
-
86
- try:
87
- file_dict = LEval(file_dict)
88
- except Exception as e:
89
- print(f"Error! {e}")
90
- return f"Couldn't load tensorboard file! {e}"
91
-
92
- weights = [f for f in os.scandir(weights_dir)]
93
- for key, value in file_dict.items():
94
- pattern = fr"^{model_name}_.*_s{value}\.pth$"
95
- matching_weights = [f.name for f in weights if f.is_file() and RSearch(pattern, f.name)]
96
- for weight in matching_weights:
97
- source_path = weights_dir_path + weight
98
- destination_path = os.path.join(lowestval_weight_dir, weight)
99
-
100
- copy2(source_path, destination_path)
101
-
102
- logdir.append(f"File = {weight} Value: {key}, Step: {value}")
103
-
104
- lbldict['Names'][weight] = weight
105
- lbldict['Values'][weight] = key
106
-
107
- files.append(low_val_path + weight)
108
-
109
- print(f"File = {weight} Value: {key}, Step: {value}")
110
-
111
- yield ('\n'.join(logdir), files, pd.DataFrame(lbldict))
112
-
113
-
114
- return ''.join(logdir), files, pd.DataFrame(lbldict)
115
-
116
-
117
- if __name__ == "__main__":
118
- model = str(input("Enter the name of the model: "))
119
- sav_freq = int(input("Enter save frequency of the model: "))
120
- ds = main(model, sav_freq)
121
-
122
- if ds: selectweights(model, ds, weights_dir, lowestval_weight_dir)
123
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EuroPython2022/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/models/yolo.py DELETED
@@ -1,283 +0,0 @@
1
- import argparse
2
- import logging
3
- import sys
4
- from copy import deepcopy
5
- from pathlib import Path
6
-
7
- import math
8
-
9
- sys.path.append('./') # to run '$ python *.py' files in subdirectories
10
- logger = logging.getLogger(__name__)
11
-
12
- import torch
13
- import torch.nn as nn
14
-
15
- from models.common import Conv, Bottleneck, SPP, DWConv, Focus, BottleneckCSP, Concat, NMS, autoShape
16
- from models.experimental import MixConv2d, CrossConv, C3
17
- from utils.general import check_anchor_order, make_divisible, check_file, set_logging
18
- from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
19
- select_device, copy_attr
20
-
21
-
22
- class Detect(nn.Module):
23
- stride = None # strides computed during build
24
- export = False # onnx export
25
-
26
- def __init__(self, nc=80, anchors=(), ch=()): # detection layer
27
- super(Detect, self).__init__()
28
- self.nc = nc # number of classes
29
- self.no = nc + 5 # number of outputs per anchor
30
- self.nl = len(anchors) # number of detection layers
31
- self.na = len(anchors[0]) // 2 # number of anchors
32
- self.grid = [torch.zeros(1)] * self.nl # init grid
33
- a = torch.tensor(anchors).float().view(self.nl, -1, 2)
34
- self.register_buffer('anchors', a) # shape(nl,na,2)
35
- self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
36
- self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
37
-
38
- def forward(self, x):
39
- # x = x.copy() # for profiling
40
- z = [] # inference output
41
- self.training |= self.export
42
- for i in range(self.nl):
43
- x[i] = self.m[i](x[i]) # conv
44
- bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
45
- x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
46
-
47
- if not self.training: # inference
48
- if self.grid[i].shape[2:4] != x[i].shape[2:4]:
49
- self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
50
-
51
- y = x[i].sigmoid()
52
- y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
53
- y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
54
- z.append(y.view(bs, -1, self.no))
55
-
56
- return x if self.training else (torch.cat(z, 1), x)
57
-
58
- @staticmethod
59
- def _make_grid(nx=20, ny=20):
60
- yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
61
- return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
62
-
63
-
64
- class Model(nn.Module):
65
- def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None): # model, input channels, number of classes
66
- super(Model, self).__init__()
67
- if isinstance(cfg, dict):
68
- self.yaml = cfg # model dict
69
- else: # is *.yaml
70
- import yaml # for torch hub
71
- self.yaml_file = Path(cfg).name
72
- with open(cfg) as f:
73
- self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
74
-
75
- # Define model
76
- if nc and nc != self.yaml['nc']:
77
- print('Overriding model.yaml nc=%g with nc=%g' % (self.yaml['nc'], nc))
78
- self.yaml['nc'] = nc # override yaml value
79
- self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist, ch_out
80
- # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
81
-
82
- # Build strides, anchors
83
- m = self.model[-1] # Detect()
84
- if isinstance(m, Detect):
85
- s = 128 # 2x min stride
86
- m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
87
- m.anchors /= m.stride.view(-1, 1, 1)
88
- check_anchor_order(m)
89
- self.stride = m.stride
90
- self._initialize_biases() # only run once
91
- # print('Strides: %s' % m.stride.tolist())
92
-
93
- # Init weights, biases
94
- initialize_weights(self)
95
- self.info()
96
- print('')
97
-
98
- def forward(self, x, augment=False, profile=False):
99
- if augment:
100
- img_size = x.shape[-2:] # height, width
101
- s = [1, 0.83, 0.67] # scales
102
- f = [None, 3, None] # flips (2-ud, 3-lr)
103
- y = [] # outputs
104
- for si, fi in zip(s, f):
105
- xi = scale_img(x.flip(fi) if fi else x, si)
106
- yi = self.forward_once(xi)[0] # forward
107
- # cv2.imwrite('img%g.jpg' % s, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
108
- yi[..., :4] /= si # de-scale
109
- if fi == 2:
110
- yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
111
- elif fi == 3:
112
- yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
113
- y.append(yi)
114
- return torch.cat(y, 1), None # augmented inference, train
115
- else:
116
- return self.forward_once(x, profile) # single-scale inference, train
117
-
118
- def forward_once(self, x, profile=False):
119
- y, dt = [], [] # outputs
120
- for m in self.model:
121
- if m.f != -1: # if not from previous layer
122
- x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
123
-
124
- if profile:
125
- try:
126
- import thop
127
- o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # FLOPS
128
- except:
129
- o = 0
130
- t = time_synchronized()
131
- for _ in range(10):
132
- _ = m(x)
133
- dt.append((time_synchronized() - t) * 100)
134
- print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
135
-
136
- x = m(x) # run
137
- y.append(x if m.i in self.save else None) # save output
138
-
139
- if profile:
140
- print('%.1fms total' % sum(dt))
141
- return x
142
-
143
- def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
144
- # https://arxiv.org/abs/1708.02002 section 3.3
145
- # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
146
- m = self.model[-1] # Detect() module
147
- for mi, s in zip(m.m, m.stride): # from
148
- b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
149
- b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
150
- b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
151
- mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
152
-
153
- def _print_biases(self):
154
- m = self.model[-1] # Detect() module
155
- for mi in m.m: # from
156
- b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
157
- print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
158
-
159
- # def _print_weights(self):
160
- # for m in self.model.modules():
161
- # if type(m) is Bottleneck:
162
- # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
163
-
164
- def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
165
- # print('Fusing layers... ')
166
- for m in self.model.modules():
167
- if type(m) is Conv and hasattr(m, 'bn'):
168
- m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
169
- delattr(m, 'bn') # remove batchnorm
170
- m.forward = m.fuseforward # update forward
171
- self.info()
172
- return self
173
-
174
- def nms(self, mode=True): # add or remove NMS module
175
- present = type(self.model[-1]) is NMS # last layer is NMS
176
- if mode and not present:
177
- print('Adding NMS... ')
178
- m = NMS() # module
179
- m.f = -1 # from
180
- m.i = self.model[-1].i + 1 # index
181
- self.model.add_module(name='%s' % m.i, module=m) # add
182
- self.eval()
183
- elif not mode and present:
184
- print('Removing NMS... ')
185
- self.model = self.model[:-1] # remove
186
- return self
187
-
188
- def autoshape(self): # add autoShape module
189
- print('Adding autoShape... ')
190
- m = autoShape(self) # wrap model
191
- copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
192
- return m
193
-
194
- def info(self, verbose=False): # print model information
195
- model_info(self, verbose)
196
-
197
-
198
- def parse_model(d, ch): # model_dict, input_channels(3)
199
- logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
200
- anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
201
- na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
202
- no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
203
-
204
- layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
205
- for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
206
- m = eval(m) if isinstance(m, str) else m # eval strings
207
- for j, a in enumerate(args):
208
- try:
209
- args[j] = eval(a) if isinstance(a, str) else a # eval strings
210
- except:
211
- pass
212
-
213
- n = max(round(n * gd), 1) if n > 1 else n # depth gain
214
- if m in [Conv, Bottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP, C3]:
215
- c1, c2 = ch[f], args[0]
216
-
217
- # Normal
218
- # if i > 0 and args[0] != no: # channel expansion factor
219
- # ex = 1.75 # exponential (default 2.0)
220
- # e = math.log(c2 / ch[1]) / math.log(2)
221
- # c2 = int(ch[1] * ex ** e)
222
- # if m != Focus:
223
-
224
- c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
225
-
226
- # Experimental
227
- # if i > 0 and args[0] != no: # channel expansion factor
228
- # ex = 1 + gw # exponential (default 2.0)
229
- # ch1 = 32 # ch[1]
230
- # e = math.log(c2 / ch1) / math.log(2) # level 1-n
231
- # c2 = int(ch1 * ex ** e)
232
- # if m != Focus:
233
- # c2 = make_divisible(c2, 8) if c2 != no else c2
234
-
235
- args = [c1, c2, *args[1:]]
236
- if m in [BottleneckCSP, C3]:
237
- args.insert(2, n)
238
- n = 1
239
- elif m is nn.BatchNorm2d:
240
- args = [ch[f]]
241
- elif m is Concat:
242
- c2 = sum([ch[-1 if x == -1 else x + 1] for x in f])
243
- elif m is Detect:
244
- args.append([ch[x + 1] for x in f])
245
- if isinstance(args[1], int): # number of anchors
246
- args[1] = [list(range(args[1] * 2))] * len(f)
247
- else:
248
- c2 = ch[f]
249
-
250
- m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
251
- t = str(m)[8:-2].replace('__main__.', '') # module type
252
- np = sum([x.numel() for x in m_.parameters()]) # number params
253
- m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
254
- logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
255
- save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
256
- layers.append(m_)
257
- ch.append(c2)
258
- return nn.Sequential(*layers), sorted(save)
259
-
260
-
261
- if __name__ == '__main__':
262
- parser = argparse.ArgumentParser()
263
- parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
264
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
265
- opt = parser.parse_args()
266
- opt.cfg = check_file(opt.cfg) # check file
267
- set_logging()
268
- device = select_device(opt.device)
269
-
270
- # Create model
271
- model = Model(opt.cfg).to(device)
272
- model.train()
273
-
274
- # Profile
275
- # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
276
- # y = model(img, profile=True)
277
-
278
- # Tensorboard
279
- # from torch.utils.tensorboard import SummaryWriter
280
- # tb_writer = SummaryWriter()
281
- # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
282
- # tb_writer.add_graph(model.model, img) # add model to tensorboard
283
- # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FSDL-Fashion/fashion_img_search/app.py DELETED
@@ -1 +0,0 @@
1
- from fis.app import app # noqa: F401