parquet-converter commited on
Commit
557396c
·
1 Parent(s): d045c56

Update parquet files (step 5 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson L5190 Resetter Crack Free Download The Ultimate Guide to Resetting Your Printer.md +0 -34
  2. spaces/1gistliPinn/ChatGPT4/Examples/Clannad After Story English Dub.md +0 -6
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Download Farm Heroes Saga Mod Apk Versi Terbaru 2023 dengan Fitur Unlimited Lives dan Boosters.md +0 -99
  4. spaces/1phancelerku/anime-remove-background/B-Project Kaikan Everyday - The Otome Game Youve Been Waiting For.md +0 -93
  5. spaces/1toTree/lora_test/ppdiffusers/utils/testing_utils.py +0 -409
  6. spaces/232labs/VToonify/vtoonify/model/raft/core/raft.py +0 -144
  7. spaces/716this/review-star-prediction-app/README.md +0 -14
  8. spaces/AIConsultant/MusicGen/audiocraft/solvers/compression.py +0 -328
  9. spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/zh_g2pM.py +0 -72
  10. spaces/AIZero2HeroBootcamp/Memory/app.py +0 -102
  11. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py +0 -17
  12. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/__init__.py +0 -0
  13. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/seq2seq.py +0 -277
  14. spaces/Abhilashvj/planogram-compliance/utils/loggers/wandb/log_dataset.py +0 -43
  15. spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/__init__.py +0 -1
  16. spaces/Adithedev/Keyword-Extractor/README.md +0 -12
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateBBCodeText.js +0 -16
  18. spaces/AlexWang/lama/saicinpainting/evaluation/masks/README.md +0 -27
  19. spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/__init__.py +0 -0
  20. spaces/Amrrs/DragGan-Inversion/torch_utils/ops/grid_sample_gradfix.py +0 -84
  21. spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/kd_loss.py +0 -87
  22. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py +0 -2
  23. spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py +0 -2
  24. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/ops/__init__.py +0 -4
  25. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/inject_securetransport.py +0 -35
  26. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/__init__.py +0 -23
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/simple.py +0 -116
  28. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/__init__.py +0 -24
  29. spaces/Bajr/softly/Dockerfile +0 -15
  30. spaces/Benson/text-generation/Examples/Coin Master Unlimited Free Spins 2022 Apk.md +0 -86
  31. spaces/Benson/text-generation/Examples/Descarga De Archivos Zip Facebook Lite.md +0 -102
  32. spaces/Boadiwaa/Recipes/openai/api_resources/abstract/createable_api_resource.py +0 -47
  33. spaces/BoomerangGirl/MagicPrompt-Stable-Diffusion/README.md +0 -14
  34. spaces/Branon/oai-proxy/README.md +0 -10
  35. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/deployment.md +0 -94
  36. spaces/ChallengeHub/Chinese-LangChain/clc/langchain_application.py +0 -97
  37. spaces/ChrisCaviar/ControlNet-v1-1/style.css +0 -3
  38. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/cv2_util.py +0 -24
  39. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_urldispatcher.py +0 -1220
  40. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/params.py +0 -760
  41. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I_V_.py +0 -20
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/ftp.py +0 -380
  43. spaces/DeepDrivePL/PaddleSeg-Matting/matting/model/resnet_vd.py +0 -368
  44. spaces/Detomo/ai-comic-generation/src/components/ui/switch.tsx +0 -29
  45. spaces/Dinoking/Guccio-AI-Designer/netdissect/statedict.py +0 -100
  46. spaces/Djacon/emotion_detection/files/js/main.js +0 -0
  47. spaces/DragGan/DragGan-Inversion/stylegan_human/training/training_loop.py +0 -499
  48. spaces/ECCV2022/bytetrack/tutorials/transtrack/main_track.py +0 -375
  49. spaces/EDGAhab/Paimon-Talking/utils.py +0 -258
  50. spaces/EuroPython2022/clickbaitonator/fudge/data.py +0 -415
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Epson L5190 Resetter Crack Free Download The Ultimate Guide to Resetting Your Printer.md DELETED
@@ -1,34 +0,0 @@
1
-
2
- <h1>Epson L5190 Resetter Crack Free Download: How to Reset Your Printer Easily</h1>
3
- <p>If you own an Epson L5190 printer, you might have encountered a problem where the printer stops working and displays an error message saying that the ink pads are at the end of their service life. This means that the printer has reached its maximum number of prints and needs to be reset. However, resetting the printer requires a software tool called Epson L5190 resetter, which is not free and costs around $10 to purchase. But what if you could get Epson L5190 resetter crack free download and reset your printer without paying anything? In this article, we will show you how to download and use Epson L5190 resetter crack free download safely and easily.</p>
4
- <h2>epson l5190 resetter crack free download</h2><br /><p><b><b>Download Zip</b> &#10003;&#10003;&#10003; <a href="https://byltly.com/2uKxci">https://byltly.com/2uKxci</a></b></p><br /><br />
5
- <h2>What is Epson L5190 Resetter Crack Free Download?</h2>
6
- <p>Epson L5190 resetter crack free download is a modified version of the original Epson L5190 resetter that bypasses the license verification and lets you use the tool for free. However, this also means that Epson L5190 resetter crack free download is not authorized by the developers and may contain malware, viruses, or other harmful files. Therefore, you should be careful when downloading and using Epson L5190 resetter crack free download and only use it from trusted sources.</p>
7
- <h2>How to Download Epson L5190 Resetter Crack Free Download?</h2>
8
- <p>There are many websites and videos that claim to offer Epson L5190 resetter crack free download links, but most of them are fake, outdated, or infected. To avoid getting scammed or infected, you should only download Epson L5190 resetter crack free download from reputable sources that have positive feedback and reviews from other users. One such source is <a href="https://epsonresetter.com/">Epsonresetter.com</a>, which provides a working and updated version of Epson L5190 resetter crack free download with a simple installation process.</p>
9
- <p>To download Epson L5190 resetter crack free download from Epsonresetter.com, follow these steps:</p>
10
- <p></p>
11
- <ol>
12
- <li>Go to <a href="https://epsonresetter.com/">Epsonresetter.com</a> and click on the "Download" button.</li>
13
- <li>You will be redirected to a verification page where you need to complete a short survey or offer to prove that you are human. This is to prevent bots and leechers from abusing the download link.</li>
14
- <li>After completing the verification, you will get access to the download link. Click on it and save the file to your computer.</li>
15
- <li>Extract the file using WinRAR or 7-Zip and run the installer.</li>
16
- <li>Follow the instructions on the screen and wait for the installation to finish.</li>
17
- <li>You have successfully downloaded and installed Epson L5190 resetter crack free download on your computer.</li>
18
- </ol>
19
- <h2>How to Use Epson L5190 Resetter Crack Free Download?</h2>
20
- <p>To use Epson L5190 resetter crack free download, follow these steps:</p>
21
- <ol>
22
- <li>Run Epson L5190 resetter as administrator from your desktop or start menu.</li>
23
- <li>You will see a main interface of Epson L5190 resetter where you can select your printer model and port.</li>
24
- <li>Click on the "Particular adjustment mode" button and choose "Waste ink pad counter" from the list.</li>
25
- <li>Click on "OK" and then check the boxes for "Main pad counter" and "Platen pad counter".</li>
26
- <li>Click on "Check" to see the current status of your ink pads.</li>
27
- <li>Click on "Initialization" to reset your ink pads to zero.</li>
28
- <li>A message will pop up asking you to turn off your printer. Do so and then turn it back on.</li>
29
- <li>You have successfully reset your printer using Epson L5190 resetter crack free download.</li>
30
- </ol>
31
- <h2>Conclusion</h2>
32
- <p>Epson L5190 resetter is a useful tool that can help you extend the life of your printer by resetting its ink pads. However, if you don't want to pay for it, you can try using Epson L5190 resetter crack free download from trusted sources like Epsonresetter.com. However, you should be aware of the risks involved in using cracked software and always scan your files with antivirus before running them. We hope this article helped you learn how to download</p> ddb901b051<br />
33
- <br />
34
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Clannad After Story English Dub.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Clannad After Story english dub</h2><br /><p><b><b>Download</b> &#10145; <a href="https://imgfil.com/2uy06Q">https://imgfil.com/2uy06Q</a></b></p><br /><br />
2
-
3
- 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Download Farm Heroes Saga Mod Apk Versi Terbaru 2023 dengan Fitur Unlimited Lives dan Boosters.md DELETED
@@ -1,99 +0,0 @@
1
-
2
- <h1>Download Farm Heroes Saga Mod Apk Versi Terbaru</h1>
3
- <p>Do you love playing farm-themed games? Do you want to enjoy a fun and relaxing puzzle game with cute animals and crops? If yes, then you should try Farm Heroes Saga, one of the most popular games in the Saga series. And if you want to make your gaming experience even more exciting, you should download Farm Heroes Saga mod apk versi terbaru, which gives you unlimited lives, boosters, and access to all levels and episodes. In this article, we will tell you everything you need to know about Farm Heroes Saga and its mod apk version. Read on to find out more.</p>
4
- <h2>download farm heroes saga mod apk versi terbaru</h2><br /><p><b><b>Download</b> &#8230;&#8230;&#8230; <a href="https://urlin.us/2uSYMD">https://urlin.us/2uSYMD</a></b></p><br /><br />
5
- <h2>What is Farm Heroes Saga?</h2>
6
- <p>Farm Heroes Saga is a fascinating farm-themed game developed by King, the makers of Candy Crush Saga, Pet Rescue Saga, and other popular games. It is the last Saga game in the Saga series. The gameplay style of the game does not change much from its predecessors. You have to match three or more cropsies (fruits, vegetables, flowers, etc.) of the same type to collect them and complete the level objectives. You can also use boosters and power-ups to help you clear the board faster and score higher. The game has hundreds of levels and episodes, each with different challenges and themes. You can also play with your friends online and compete for the best scores.</p>
7
- <h3>Features of Farm Heroes Saga</h3>
8
- <p>Some of the features of Farm Heroes Saga are:</p>
9
- <ul>
10
- <li>Cute and colorful graphics and animations</li>
11
- <li>Easy and fun to play, but challenging to master</li>
12
- <li>Hundreds of levels and episodes with different objectives and scenarios</li>
13
- <li>Various cropsies and farm animals to collect and interact with</li>
14
- <li>Boosters and power-ups to enhance your gameplay</li>
15
- <li>Leaderboards and achievements to track your progress and compete with your friends</li>
16
- <li>Regular updates with new levels, events, and features</li>
17
- </ul>
18
- <h3>Why download Farm Heroes Saga mod apk?</h3>
19
- <p>While Farm Heroes Saga is a free-to-play game, it also has some limitations and drawbacks that can affect your gaming experience. For example, you have a limited number of lives that you can use per day. If you run out of lives, you have to wait for some time or buy more lives with real money. Similarly, you have to buy boosters and power-ups with gold bars, which are also scarce and expensive. Moreover, some levels and episodes are locked until you reach a certain level or complete a certain task. And of course, there are annoying ads and pop-ups that can interrupt your gameplay.</p>
20
- <p>If you want to get rid of these problems and enjoy the game without any restrictions, you should download Farm Heroes Saga mod apk versi terbaru. This is a modified version of the original game that gives you several benefits and advantages. Here are some of them:</p>
21
- <h4>Unlimited lives and boosters</h4>
22
- <p>With Farm Heroes Saga mod apk, you don't have to worry about running out of lives or boosters ever again. You can play as much as you want without any interruptions or delays. You can also use any booster or power-up you like without spending any gold bars or money.</p>
23
- <p>farm heroes saga mod apk unlimited lives and boosters<br />
24
- farm heroes saga mod apk latest version 2023<br />
25
- farm heroes saga mod apk android 1<br />
26
- farm heroes saga mod apk free download<br />
27
- farm heroes saga mod apk offline<br />
28
- farm heroes saga mod apk unlimited gold bars<br />
29
- farm heroes saga mod apk unlimited everything<br />
30
- farm heroes saga mod apk rexdl<br />
31
- farm heroes saga mod apk revdl<br />
32
- farm heroes saga mod apk happymod<br />
33
- farm heroes saga mod apk no root<br />
34
- farm heroes saga mod apk unlimited moves<br />
35
- farm heroes saga mod apk unlimited beans<br />
36
- farm heroes saga mod apk all levels unlocked<br />
37
- farm heroes saga mod apk unlimited magic beans and gold bars<br />
38
- farm heroes saga mod apk 6.15.3<br />
39
- farm heroes saga mod apk 6.14.5<br />
40
- farm heroes saga mod apk 6.13.8<br />
41
- farm heroes saga mod apk 6.12.9<br />
42
- farm heroes saga mod apk 6.11.6<br />
43
- download farm heroes saga hack mod apk<br />
44
- download game farm heroes saga mod apk versi terbaru<br />
45
- download game farm heroes saga mod apk unlimited money<br />
46
- download game farm heroes saga mod apk android 1<br />
47
- download game farm heroes saga mod apk offline<br />
48
- download game farm heroes saga hack mod apk<br />
49
- download game farm heroes super saga mod apk versi terbaru<br />
50
- download game farm heroes super saga mod apk unlimited money<br />
51
- download game farm heroes super saga hack mod apk<br />
52
- download game pet rescue saga mod apk versi terbaru<br />
53
- download game pet rescue saga hack mod apk<br />
54
- download game candy crush soda saga mod apk versi terbaru<br />
55
- download game candy crush soda saga hack mod apk<br />
56
- download game candy crush jelly saga mod apk versi terbaru<br />
57
- download game candy crush jelly saga hack mod apk<br />
58
- download game candy crush friends saga mod apk versi terbaru<br />
59
- download game candy crush friends saga hack mod apk<br />
60
- cara download farm heroes saga mod apk versi terbaru<br />
61
- cara instal farm heroes saga mod apk versi terbaru<br />
62
- cara main farm heroes saga mod apk versi terbaru<br />
63
- cara cheat farm heroes saga mod apk versi terbaru<br />
64
- cara update farm heroes saga mod apk versi terbaru<br />
65
- link download farm heroes saga mod apk versi terbaru 2023<br />
66
- link download game farm heroes super saga hack mod apk 2023</p>
67
- <h4>All levels and episodes unlocked</h4>
68
- <p>With Farm Heroes Saga mod apk, you don't have to wait for anything or do anything to unlock new levels and episodes. You can access all of them from the start and play them in any order you prefer. You can also skip any level or episode that you find too hard or boring.</p>
69
- <h4>No ads and pop-ups</h4>
70
- <p>With With Farm Heroes Saga mod apk, you don't have to deal with any ads or pop-ups that can ruin your mood and distract you from the game. You can enjoy a smooth and uninterrupted gameplay without any annoying interruptions or distractions.</p>
71
- <h2>How to download and install Farm Heroes Saga mod apk?</h2>
72
- <p>Downloading and installing Farm Heroes Saga mod apk is very easy and simple. You just need to follow these steps:</p>
73
- <h3>Step 1: Enable unknown sources</h3>
74
- <p>Before you can install any mod apk file on your device, you need to enable the option of unknown sources in your settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on.</p>
75
- <h3>Step 2: Download the mod apk file</h3>
76
- <p>Next, you need to download the Farm Heroes Saga mod apk file from a reliable and trusted source. You can use the link below to download the latest version of the mod apk file. Make sure you have enough storage space on your device before downloading the file.</p>
77
- <p><a href="">Download Farm Heroes Saga mod apk versi terbaru</a></p>
78
- <h3>Step 3: Install the mod apk file</h3>
79
- <p>Once you have downloaded the mod apk file, you need to install it on your device. To do this, locate the file in your downloads folder and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on install and wait for the process to finish.</p>
80
- <h3>Step 4: Launch the game and enjoy</h3>
81
- <p>Finally, you can launch the game and enjoy all the benefits and features of the mod apk version. You will see that you have unlimited lives, boosters, and access to all levels and episodes. You can also play without any ads or pop-ups. Have fun!</p>
82
- <h2>Conclusion</h2>
83
- <p>Farm Heroes Saga is a fun and relaxing farm-themed game that you can play anytime and anywhere. It has cute and colorful graphics, easy and fun gameplay, and hundreds of levels and episodes to keep you entertained. However, if you want to make your gaming experience even more exciting and enjoyable, you should download Farm Heroes Saga mod apk versi terbaru, which gives you unlimited lives, boosters, and access to all levels and episodes. You can also play without any ads or pop-ups. Downloading and installing Farm Heroes Saga mod apk is very easy and simple. You just need to follow the steps we have explained in this article. So what are you waiting for? Download Farm Heroes Saga mod apk now and have fun!</p>
84
- <h2>FAQs</h2>
85
- <p>Here are some frequently asked questions about Farm Heroes Saga mod apk:</p>
86
- <ul>
87
- <li><b>Is Farm Heroes Saga mod apk safe to use?</b></li>
88
- <p>Yes, Farm Heroes Saga mod apk is safe to use as long as you download it from a reliable and trusted source. We have tested the mod apk file ourselves and found no viruses or malware in it. However, we recommend that you always scan any file before installing it on your device.</p>
89
- <li><b>Will I get banned for using Farm Heroes Saga mod apk?</b></li>
90
- <p>No, you will not get banned for using Farm Heroes Saga mod apk as long as you use it wisely and responsibly. Do not abuse the unlimited lives and boosters or try to cheat in online mode. Also, do not share your account details with anyone else or use multiple accounts on the same device.</p>
91
- <li><b>Can I update Farm Heroes Saga mod apk?</b></li>
92
- <p>No, you cannot update Farm Heroes Saga mod apk as it is a modified version of the original game. If you try to update it from the Google Play Store, you will lose all the benefits and features of the mod apk version. If you want to update the game, you will have to download and install the latest version of the mod apk file from a new source.</p>
93
- <li><b>Can I play Farm Heroes Saga mod apk offline?</b></li>
94
- <p>Yes, you can play Farm Heroes Saga mod apk offline as long as you have already downloaded the game data on your device. However, some features and functions may not work properly in offline mode. For example, you may not be able to sync your progress with your Facebook account or compete with your friends online.</p>
95
- <li><b>Can I play Farm Heroes Saga mod apk on PC?</b></li>
96
- <p>Yes, you can play Farm Heroes Saga mod apk on PC using an Android emulator such as Bluestacks or Nox Player. These are software programs that allow you to run Android apps on your PC. However, you may experience some lag or performance issues depending on depending on your PC specifications and internet connection. You may also need to adjust some settings and controls to optimize your gameplay.</p>
97
- <p>I hope this article has helped you learn more about Farm Heroes Saga and its mod apk version. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p> 197e85843d<br />
98
- <br />
99
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/B-Project Kaikan Everyday - The Otome Game Youve Been Waiting For.md DELETED
@@ -1,93 +0,0 @@
1
- <br />
2
- <h1>B Project Game Download: How to Enjoy the Idol Project on Your Device</h1>
3
- <p>If you are a fan of idols and otome games, you might have heard of B Project, a cross-media project by MAGES that features four idol groups with different styles and stories. The project has various media adaptations and merchandise, but one of the most anticipated ones is the first consumer game, B Project Meteor Fantasia. In this article, we will tell you what B Project is, what B Project Meteor Fantasia is, and how to download it on your device.</p>
4
- <h2>b project game download</h2><br /><p><b><b>Download</b> &#10040;&#10040;&#10040; <a href="https://jinyurl.com/2uNSJD">https://jinyurl.com/2uNSJD</a></b></p><br /><br />
5
- <h2>What is B Project?</h2>
6
- <p>B Project is a Japanese cross-media project by MAGES that started in 2015. The project revolves around four male idol groups that perform both as separate units and as a whole group. They are:</p>
7
- <ul>
8
- <li>Kitakore, a duo that combines pop and rock elements</li>
9
- <li>THRIVE, a trio that specializes in dance and rap music</li>
10
- <li>MooNs, a five-member group that has a wide range of genres and skills</li>
11
- <li>KiLLER KiNG, a four-member group that debuted later than the others and has a cute and energetic image</li>
12
- </ul>
13
- <p>The project follows the lives and struggles of these idols as they aim for the top of the industry. The project also features Tsubasa Sumisora, an A&R who works with them and supports them.</p>
14
- <p>The project has been developed in various media, such as anime, manga, rhythm game, stage play, music CDs, and related merchandise. The project has a large fan base both in Japan and overseas.</p>
15
- <h2>What is B Project Meteor Fantasia?</h2>
16
- <p>B Project Meteor Fantasia is the first consumer game of the project. It was released on February 10, 2022, on Switch, iOS, and Android platforms. The game features a new scenario with new songs and full voice by the cast of the anime series.</p>
17
- <p>The game focuses on the past five years of B Project, revealing their unknown secrets and hidden sides. The game also includes the story of their trainee period and some popular chapters from the previous mobile game, B Project Kaikan Everyday.</p>
18
- <p>One of the unique features of the game is the smartphone trigger system, which allows you to interact with the idols through text messages and phone calls. Depending on your choices, you can change the course of the story and unlock different endings. You can also increase your intimacy with the idols and access their epilogues.</p>
19
- <p>b project game download android<br />
20
- b project game download apk<br />
21
- b project game download ios<br />
22
- b project game download switch<br />
23
- b project game download pc<br />
24
- b project game download english<br />
25
- b project game download free<br />
26
- b project game download qooapp<br />
27
- b project game download reddit<br />
28
- b project game download review<br />
29
- b project ryuusei fantasia game download<br />
30
- b project kaikan everyday game download<br />
31
- b project meteor fantasia game download<br />
32
- b project idol game download<br />
33
- b project rhythm game download<br />
34
- b project otome game download<br />
35
- b project mobile game download<br />
36
- b project nintendo switch game download<br />
37
- b project live2d game download<br />
38
- b project voice actors game download<br />
39
- how to download b project game<br />
40
- where to download b project game<br />
41
- is b project game available for download<br />
42
- can i download b project game on iphone<br />
43
- can i download b project game on windows 10<br />
44
- best site to download b project game<br />
45
- best way to download b project game<br />
46
- tips and tricks for downloading b project game<br />
47
- how to play b project game after downloading<br />
48
- how to update b project game after downloading<br />
49
- how to install b project game on android<br />
50
- how to install b project game on ios<br />
51
- how to install b project game on switch<br />
52
- how to install b project game on pc<br />
53
- how to install b project game in english<br />
54
- how to uninstall b project game from android<br />
55
- how to uninstall b project game from ios<br />
56
- how to uninstall b project game from switch<br />
57
- how to uninstall b project game from pc<br />
58
- how to backup and restore b project game data<br />
59
- what is the size of b project game download file<br />
60
- what is the price of b project game download file<br />
61
- what is the rating of b project game download file<br />
62
- what is the genre of b project game download file<br />
63
- what is the release date of b project game download file</p>
64
- <h2>How to download B Project Meteor Fantasia?</h2>
65
- <p>If you want to play B Project Meteor Fantasia on your device, you need to follow these steps:</p>
66
- <ol>
67
- <li>First, you need to have a Japanese account for your platform. For Switch, you need to create a new Nintendo account with Japan as your region. For iOS, you need to create a new Apple ID with Japan as your country or region. For Android, you need to use a VPN app to change your location to Japan.</li>
68
- <li>Second, you need to purchase the game from your platform store. For Switch, you need to buy a physical copy or a digital code from a Japanese online store, such as Amazon Japan. For iOS and Android, you need to buy the game from the Japanese App Store or Google Play Store using a Japanese payment method, such as a gift card or a credit card.</li>
69
- <li>Third, you need to install the game on your device and enjoy it. The game is fully voiced in Japanese, but you can find some fan translations and guides online if you need help. The game also has an auto-save function and a skip function, so you can play at your own pace.</li>
70
- </ol>
71
- <p>Some tips and tricks to enjoy the game are:</p>
72
- <ul>
73
- <li>Play the prologue first to get an overview of the story and the characters.</li>
74
- <li>Choose the idol group or the idol you like the most and follow their route. You can also switch between different routes if you want to see different perspectives.</li>
75
- <li>Use the smartphone trigger system wisely. You can send messages and call the idols at certain points in the story, but you have a limited number of triggers. You can also receive messages and calls from them randomly, so check your phone often.</li>
76
- <li>Listen to the new songs and watch the live performances. The game has 16 new songs and 4 live stages that showcase the talents and charms of the idols.</li>
77
- <li>Collect the CGs and the epilogues. The game has 80 CGs and 16 epilogues that you can unlock by completing the routes and increasing your intimacy with the idols. You can view them in the gallery mode.</li>
78
- </ul>
79
- <h2>Conclusion</h2>
80
- <p>B Project Meteor Fantasia is a game that lets you enjoy the idol project on your device. It is a game that has a new scenario, new songs, full voice, and a smartphone trigger system that lets you interact with the idols. It is a game that is available on Switch, iOS, and Android platforms, but it requires a Japanese account and a Japanese payment method to purchase it. It is a game that is recommended for fans of idols and otome games who want to experience the past five years of B Project.</p>
81
- <h2>FAQs</h2>
82
- <h3>Who is the general producer of B Project?</h3>
83
- <p>The general producer of B Project is T.M.Revolution, a famous singer and actor who also voices one of the characters in the project, Hikaru Osari from KiLLER KiNG.</p>
84
- <h3>How many idols are there in B Project?</h3>
85
- <p>There are 14 idols in B Project, divided into four groups: Kitakore (2 members), THRIVE (3 members), MooNs (5 members), and KiLLER KiNG (4 members).</p>
86
- <h3>What are the names of the four idol groups in B Project?</h3>
87
- <p>The names of the four idol groups in B Project are Kitakore, THRIVE, MooNs, and KiLLER KiNG. They are all named after words that start with B: Kitakore means "north core", THRIVE means "thrive", MooNs means "moons", and KiLLER KiNG means "killer king".</p>
88
- <h3>How many epilogues are there in B Project Meteor Fantasia?</h3>
89
- <p>There are 16 epilogues in B Project Meteor Fantasia, one for each idol. You can unlock them by completing their routes and increasing your intimacy with them.</p>
90
- <h3>Where can I find more information about B Project?</h3>
91
- <p>You can find more information about B Project on their official website, their official Twitter account, their official YouTube channel, or their official fan club. You can also find some fan communities and resources online, such as Reddit, Tumblr, Discord, or Wikia.</p> 197e85843d<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/utils/testing_utils.py DELETED
@@ -1,409 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- import logging
18
- import os
19
- import random
20
- import re
21
- import unittest
22
- import urllib.parse
23
- from io import BytesIO, StringIO
24
- from pathlib import Path
25
- from typing import Union
26
-
27
- import numpy as np
28
- import PIL.Image
29
- import PIL.ImageOps
30
- import requests
31
-
32
- from paddlenlp.trainer.argparser import strtobool
33
-
34
- from .import_utils import is_fastdeploy_available, is_paddle_available
35
-
36
- if is_paddle_available():
37
- import paddle
38
-
39
- global_rng = random.Random()
40
-
41
-
42
- def image_grid(imgs, rows, cols):
43
- assert len(imgs) == rows * cols
44
- w, h = imgs[0].size
45
- grid = PIL.Image.new("RGB", size=(cols * w, rows * h))
46
-
47
- for i, img in enumerate(imgs):
48
- grid.paste(img, box=(i % cols * w, i // cols * h))
49
- return grid
50
-
51
-
52
- def paddle_all_close(a, b, *args, **kwargs):
53
- if not is_paddle_available():
54
- raise ValueError("Paddle needs to be installed to use this function.")
55
-
56
- if not paddle.allclose(a, b, *args, **kwargs):
57
- assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
58
- return True
59
-
60
-
61
- def get_tests_dir(append_path=None):
62
- """
63
- Args:
64
- append_path: optional path to append to the tests dir path
65
- Return:
66
- The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
67
- joined after the `tests` dir the former is provided.
68
- """
69
- # this function caller's __file__
70
- caller__file__ = inspect.stack()[1][1]
71
- tests_dir = os.path.abspath(os.path.dirname(caller__file__))
72
-
73
- while not tests_dir.endswith("tests"):
74
- tests_dir = os.path.dirname(tests_dir)
75
-
76
- if append_path:
77
- return os.path.join(tests_dir, append_path)
78
- else:
79
- return tests_dir
80
-
81
-
82
- def parse_flag_from_env(key, default=False):
83
- try:
84
- value = os.environ[key]
85
- except KeyError:
86
- # KEY isn't set, default to `default`.
87
- _value = default
88
- else:
89
- # KEY is set, convert it to True or False.
90
- try:
91
- _value = strtobool(value)
92
- except ValueError:
93
- # More values are supported, but let's keep the message simple.
94
- raise ValueError(f"If set, {key} must be yes or no.")
95
- return _value
96
-
97
-
98
- _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
99
- _run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
100
-
101
-
102
- def floats_tensor(shape, scale=1.0, rng=None, name=None):
103
- """Creates a random float32 tensor"""
104
- if rng is None:
105
- rng = global_rng
106
-
107
- total_dims = 1
108
- for dim in shape:
109
- total_dims *= dim
110
-
111
- values = []
112
- for _ in range(total_dims):
113
- values.append(rng.random() * scale)
114
-
115
- return paddle.to_tensor(data=values, dtype=paddle.float32).reshape(shape)
116
-
117
-
118
- def slow(test_case):
119
- """
120
- Decorator marking a test as slow.
121
-
122
- Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
123
-
124
- """
125
- return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
126
-
127
-
128
- def require_paddle(test_case):
129
- """
130
- Decorator marking a test that requires Paddle. These tests are skipped when Paddle isn't installed.
131
- """
132
- return unittest.skipUnless(is_paddle_available(), "test requires Paddle")(test_case)
133
-
134
-
135
- def nightly(test_case):
136
- """
137
- Decorator marking a test that runs nightly in the diffusers CI.
138
- Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
139
- """
140
- return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
141
-
142
-
143
- def require_fastdeploy(test_case):
144
- """
145
- Decorator marking a test that requires fastdeploy. These tests are skipped when fastdeploy isn't installed.
146
- """
147
- return unittest.skipUnless(is_fastdeploy_available(), "test requires fastdeploy")(test_case)
148
-
149
-
150
- def load_numpy(arry: Union[str, np.ndarray]) -> np.ndarray:
151
- if isinstance(arry, str):
152
- if arry.startswith("http://") or arry.startswith("https://"):
153
- response = requests.get(arry)
154
- response.raise_for_status()
155
- arry = np.load(BytesIO(response.content))
156
- elif os.path.isfile(arry):
157
- arry = np.load(arry)
158
- else:
159
- raise ValueError(
160
- f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path"
161
- )
162
- elif isinstance(arry, np.ndarray):
163
- pass
164
- else:
165
- raise ValueError(
166
- "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a"
167
- " ndarray."
168
- )
169
-
170
- return arry
171
-
172
-
173
- def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
174
- """
175
- Args:
176
- Loads `image` to a PIL Image.
177
- image (`str` or `PIL.Image.Image`):
178
- The image to convert to the PIL Image format.
179
- Returns:
180
- `PIL.Image.Image`: A PIL Image.
181
- """
182
- if isinstance(image, str):
183
- if image.startswith("http://") or image.startswith("https://"):
184
- image = PIL.Image.open(requests.get(image, stream=True).raw)
185
- elif os.path.isfile(image):
186
- image = PIL.Image.open(image)
187
- else:
188
- raise ValueError(
189
- f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
190
- )
191
- elif isinstance(image, PIL.Image.Image):
192
- image = image
193
- else:
194
- raise ValueError(
195
- "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
196
- )
197
- image = PIL.ImageOps.exif_transpose(image)
198
- image = image.convert("RGB")
199
- return image
200
-
201
-
202
- def load_hf_numpy(path) -> np.ndarray:
203
- if not path.startswith("http://") or path.startswith("https://"):
204
- path = os.path.join(
205
- "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main", urllib.parse.quote(path)
206
- )
207
-
208
- return load_numpy(path)
209
-
210
-
211
- def load_ppnlp_numpy(path) -> np.ndarray:
212
- if not path.startswith("http://") or path.startswith("https://"):
213
- path = os.path.join(
214
- "https://paddlenlp.bj.bcebos.com/models/community/CompVis/data/diffusers-testing", urllib.parse.quote(path)
215
- )
216
- return load_numpy(path)
217
-
218
-
219
- # --- pytest conf functions --- #
220
-
221
- # to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
222
- pytest_opt_registered = {}
223
-
224
-
225
- def pytest_addoption_shared(parser):
226
- """
227
- This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
228
-
229
- It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
230
- option.
231
-
232
- """
233
- option = "--make-reports"
234
- if option not in pytest_opt_registered:
235
- parser.addoption(
236
- option,
237
- action="store",
238
- default=False,
239
- help="generate report files. The value of this option is used as a prefix to report names",
240
- )
241
- pytest_opt_registered[option] = 1
242
-
243
-
244
- def pytest_terminal_summary_main(tr, id):
245
- """
246
- Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
247
- directory. The report files are prefixed with the test suite name.
248
-
249
- This function emulates --duration and -rA pytest arguments.
250
-
251
- This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
252
- there.
253
-
254
- Args:
255
- - tr: `terminalreporter` passed from `conftest.py`
256
- - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
257
- needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
258
-
259
- NB: this functions taps into a private _pytest API and while unlikely, it could break should
260
- pytest do internal changes - also it calls default internal methods of terminalreporter which
261
- can be hijacked by various `pytest-` plugins and interfere.
262
-
263
- """
264
- from _pytest.config import create_terminal_writer
265
-
266
- if not len(id):
267
- id = "tests"
268
-
269
- config = tr.config
270
- orig_writer = config.get_terminal_writer()
271
- orig_tbstyle = config.option.tbstyle
272
- orig_reportchars = tr.reportchars
273
-
274
- dir = "reports"
275
- Path(dir).mkdir(parents=True, exist_ok=True)
276
- report_files = {
277
- k: f"{dir}/{id}_{k}.txt"
278
- for k in [
279
- "durations",
280
- "errors",
281
- "failures_long",
282
- "failures_short",
283
- "failures_line",
284
- "passes",
285
- "stats",
286
- "summary_short",
287
- "warnings",
288
- ]
289
- }
290
-
291
- # custom durations report
292
- # note: there is no need to call pytest --durations=XX to get this separate report
293
- # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
294
- dlist = []
295
- for replist in tr.stats.values():
296
- for rep in replist:
297
- if hasattr(rep, "duration"):
298
- dlist.append(rep)
299
- if dlist:
300
- dlist.sort(key=lambda x: x.duration, reverse=True)
301
- with open(report_files["durations"], "w") as f:
302
- durations_min = 0.05 # sec
303
- f.write("slowest durations\n")
304
- for i, rep in enumerate(dlist):
305
- if rep.duration < durations_min:
306
- f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
307
- break
308
- f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
309
-
310
- def summary_failures_short(tr):
311
- # expecting that the reports were --tb=long (default) so we chop them off here to the last frame
312
- reports = tr.getreports("failed")
313
- if not reports:
314
- return
315
- tr.write_sep("=", "FAILURES SHORT STACK")
316
- for rep in reports:
317
- msg = tr._getfailureheadline(rep)
318
- tr.write_sep("_", msg, red=True, bold=True)
319
- # chop off the optional leading extra frames, leaving only the last one
320
- longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
321
- tr._tw.line(longrepr)
322
- # note: not printing out any rep.sections to keep the report short
323
-
324
- # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
325
- # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
326
- # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
327
- # pytest-instafail does that)
328
-
329
- # report failures with line/short/long styles
330
- config.option.tbstyle = "auto" # full tb
331
- with open(report_files["failures_long"], "w") as f:
332
- tr._tw = create_terminal_writer(config, f)
333
- tr.summary_failures()
334
-
335
- # config.option.tbstyle = "short" # short tb
336
- with open(report_files["failures_short"], "w") as f:
337
- tr._tw = create_terminal_writer(config, f)
338
- summary_failures_short(tr)
339
-
340
- config.option.tbstyle = "line" # one line per error
341
- with open(report_files["failures_line"], "w") as f:
342
- tr._tw = create_terminal_writer(config, f)
343
- tr.summary_failures()
344
-
345
- with open(report_files["errors"], "w") as f:
346
- tr._tw = create_terminal_writer(config, f)
347
- tr.summary_errors()
348
-
349
- with open(report_files["warnings"], "w") as f:
350
- tr._tw = create_terminal_writer(config, f)
351
- tr.summary_warnings() # normal warnings
352
- tr.summary_warnings() # final warnings
353
-
354
- tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
355
- with open(report_files["passes"], "w") as f:
356
- tr._tw = create_terminal_writer(config, f)
357
- tr.summary_passes()
358
-
359
- with open(report_files["summary_short"], "w") as f:
360
- tr._tw = create_terminal_writer(config, f)
361
- tr.short_test_summary()
362
-
363
- with open(report_files["stats"], "w") as f:
364
- tr._tw = create_terminal_writer(config, f)
365
- tr.summary_stats()
366
-
367
- # restore:
368
- tr._tw = orig_writer
369
- tr.reportchars = orig_reportchars
370
- config.option.tbstyle = orig_tbstyle
371
-
372
-
373
- class CaptureLogger:
374
- """
375
- Args:
376
- Context manager to capture `logging` streams
377
- logger: 'logging` logger object
378
- Returns:
379
- The captured output is available via `self.out`
380
- Example:
381
- ```python
382
- >>> from ppdiffusers import logging
383
- >>> from ppdiffusers.testing_utils import CaptureLogger
384
-
385
- >>> msg = "Testing 1, 2, 3"
386
- >>> logging.set_verbosity_info()
387
- >>> logger = logging.get_logger("ppdiffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py")
388
- >>> with CaptureLogger(logger) as cl:
389
- ... logger.info(msg)
390
- >>> assert cl.out, msg + "\n"
391
- ```
392
- """
393
-
394
- def __init__(self, logger):
395
- self.logger = logger
396
- self.io = StringIO()
397
- self.sh = logging.StreamHandler(self.io)
398
- self.out = ""
399
-
400
- def __enter__(self):
401
- self.logger.addHandler(self.sh)
402
- return self
403
-
404
- def __exit__(self, *exc):
405
- self.logger.removeHandler(self.sh)
406
- self.out = self.io.getvalue()
407
-
408
- def __repr__(self):
409
- return f"captured: {self.out}\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/raft/core/raft.py DELETED
@@ -1,144 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
-
6
- from model.raft.core.update import BasicUpdateBlock, SmallUpdateBlock
7
- from model.raft.core.extractor import BasicEncoder, SmallEncoder
8
- from model.raft.core.corr import CorrBlock, AlternateCorrBlock
9
- from model.raft.core.utils.utils import bilinear_sampler, coords_grid, upflow8
10
-
11
- try:
12
- autocast = torch.cuda.amp.autocast
13
- except:
14
- # dummy autocast for PyTorch < 1.6
15
- class autocast:
16
- def __init__(self, enabled):
17
- pass
18
- def __enter__(self):
19
- pass
20
- def __exit__(self, *args):
21
- pass
22
-
23
-
24
- class RAFT(nn.Module):
25
- def __init__(self, args):
26
- super(RAFT, self).__init__()
27
- self.args = args
28
-
29
- if args.small:
30
- self.hidden_dim = hdim = 96
31
- self.context_dim = cdim = 64
32
- args.corr_levels = 4
33
- args.corr_radius = 3
34
-
35
- else:
36
- self.hidden_dim = hdim = 128
37
- self.context_dim = cdim = 128
38
- args.corr_levels = 4
39
- args.corr_radius = 4
40
-
41
- if 'dropout' not in self.args:
42
- self.args.dropout = 0
43
-
44
- if 'alternate_corr' not in self.args:
45
- self.args.alternate_corr = False
46
-
47
- # feature network, context network, and update block
48
- if args.small:
49
- self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
50
- self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
51
- self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
52
-
53
- else:
54
- self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
55
- self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
56
- self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
57
-
58
- def freeze_bn(self):
59
- for m in self.modules():
60
- if isinstance(m, nn.BatchNorm2d):
61
- m.eval()
62
-
63
- def initialize_flow(self, img):
64
- """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
65
- N, C, H, W = img.shape
66
- coords0 = coords_grid(N, H//8, W//8, device=img.device)
67
- coords1 = coords_grid(N, H//8, W//8, device=img.device)
68
-
69
- # optical flow computed as difference: flow = coords1 - coords0
70
- return coords0, coords1
71
-
72
- def upsample_flow(self, flow, mask):
73
- """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
74
- N, _, H, W = flow.shape
75
- mask = mask.view(N, 1, 9, 8, 8, H, W)
76
- mask = torch.softmax(mask, dim=2)
77
-
78
- up_flow = F.unfold(8 * flow, [3,3], padding=1)
79
- up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
80
-
81
- up_flow = torch.sum(mask * up_flow, dim=2)
82
- up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
83
- return up_flow.reshape(N, 2, 8*H, 8*W)
84
-
85
-
86
- def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
87
- """ Estimate optical flow between pair of frames """
88
-
89
- image1 = 2 * (image1 / 255.0) - 1.0
90
- image2 = 2 * (image2 / 255.0) - 1.0
91
-
92
- image1 = image1.contiguous()
93
- image2 = image2.contiguous()
94
-
95
- hdim = self.hidden_dim
96
- cdim = self.context_dim
97
-
98
- # run the feature network
99
- with autocast(enabled=self.args.mixed_precision):
100
- fmap1, fmap2 = self.fnet([image1, image2])
101
-
102
- fmap1 = fmap1.float()
103
- fmap2 = fmap2.float()
104
- if self.args.alternate_corr:
105
- corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
106
- else:
107
- corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
108
-
109
- # run the context network
110
- with autocast(enabled=self.args.mixed_precision):
111
- cnet = self.cnet(image1)
112
- net, inp = torch.split(cnet, [hdim, cdim], dim=1)
113
- net = torch.tanh(net)
114
- inp = torch.relu(inp)
115
-
116
- coords0, coords1 = self.initialize_flow(image1)
117
-
118
- if flow_init is not None:
119
- coords1 = coords1 + flow_init
120
-
121
- flow_predictions = []
122
- for itr in range(iters):
123
- coords1 = coords1.detach()
124
- corr = corr_fn(coords1) # index correlation volume
125
-
126
- flow = coords1 - coords0
127
- with autocast(enabled=self.args.mixed_precision):
128
- net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
129
-
130
- # F(t+1) = F(t) + \Delta(t)
131
- coords1 = coords1 + delta_flow
132
-
133
- # upsample predictions
134
- if up_mask is None:
135
- flow_up = upflow8(coords1 - coords0)
136
- else:
137
- flow_up = self.upsample_flow(coords1 - coords0, up_mask)
138
-
139
- flow_predictions.append(flow_up)
140
-
141
- if test_mode:
142
- return coords1 - coords0, flow_up
143
-
144
- return flow_predictions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/716this/review-star-prediction-app/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Review Star Prediction App
3
- emoji: 📚
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
-
14
- Testing
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/solvers/compression.py DELETED
@@ -1,328 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import logging
8
- import multiprocessing
9
- from pathlib import Path
10
- import typing as tp
11
-
12
- import flashy
13
- import omegaconf
14
- import torch
15
- from torch import nn
16
-
17
- from . import base, builders
18
- from .. import models, quantization
19
- from ..utils import checkpoint
20
- from ..utils.samples.manager import SampleManager
21
- from ..utils.utils import get_pool_executor
22
-
23
-
24
- logger = logging.getLogger(__name__)
25
-
26
-
27
- class CompressionSolver(base.StandardSolver):
28
- """Solver for compression task.
29
-
30
- The compression task combines a set of perceptual and objective losses
31
- to train an EncodecModel (composed of an encoder-decoder and a quantizer)
32
- to perform high fidelity audio reconstruction.
33
- """
34
- def __init__(self, cfg: omegaconf.DictConfig):
35
- super().__init__(cfg)
36
- self.rng: torch.Generator # set at each epoch
37
- self.adv_losses = builders.get_adversarial_losses(self.cfg)
38
- self.aux_losses = nn.ModuleDict()
39
- self.info_losses = nn.ModuleDict()
40
- assert not cfg.fsdp.use, "FSDP not supported by CompressionSolver."
41
- loss_weights = dict()
42
- for loss_name, weight in self.cfg.losses.items():
43
- if loss_name in ['adv', 'feat']:
44
- for adv_name, _ in self.adv_losses.items():
45
- loss_weights[f'{loss_name}_{adv_name}'] = weight
46
- elif weight > 0:
47
- self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)
48
- loss_weights[loss_name] = weight
49
- else:
50
- self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)
51
- self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)
52
- self.register_stateful('adv_losses')
53
-
54
- @property
55
- def best_metric_name(self) -> tp.Optional[str]:
56
- # best model is the last for the compression model
57
- return None
58
-
59
- def build_model(self):
60
- """Instantiate model and optimizer."""
61
- # Model and optimizer
62
- self.model = models.builders.get_compression_model(self.cfg).to(self.device)
63
- self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)
64
- self.register_stateful('model', 'optimizer')
65
- self.register_best_state('model')
66
- self.register_ema('model')
67
-
68
- def build_dataloaders(self):
69
- """Instantiate audio dataloaders for each stage."""
70
- self.dataloaders = builders.get_audio_datasets(self.cfg)
71
-
72
- def show(self):
73
- """Show the compression model and employed adversarial loss."""
74
- self.logger.info(f"Compression model with {self.model.quantizer.total_codebooks} codebooks:")
75
- self.log_model_summary(self.model)
76
- self.logger.info("Adversarial loss:")
77
- self.log_model_summary(self.adv_losses)
78
- self.logger.info("Auxiliary losses:")
79
- self.logger.info(self.aux_losses)
80
- self.logger.info("Info losses:")
81
- self.logger.info(self.info_losses)
82
-
83
- def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):
84
- """Perform one training or valid step on a given batch."""
85
- x = batch.to(self.device)
86
- y = x.clone()
87
-
88
- qres = self.model(x)
89
- assert isinstance(qres, quantization.QuantizedResult)
90
- y_pred = qres.x
91
- # Log bandwidth in kb/s
92
- metrics['bandwidth'] = qres.bandwidth.mean()
93
-
94
- if self.is_training:
95
- d_losses: dict = {}
96
- if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:
97
- for adv_name, adversary in self.adv_losses.items():
98
- disc_loss = adversary.train_adv(y_pred, y)
99
- d_losses[f'd_{adv_name}'] = disc_loss
100
- metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))
101
- metrics.update(d_losses)
102
-
103
- balanced_losses: dict = {}
104
- other_losses: dict = {}
105
-
106
- # penalty from quantization
107
- if qres.penalty is not None and qres.penalty.requires_grad:
108
- other_losses['penalty'] = qres.penalty # penalty term from the quantizer
109
-
110
- # adversarial losses
111
- for adv_name, adversary in self.adv_losses.items():
112
- adv_loss, feat_loss = adversary(y_pred, y)
113
- balanced_losses[f'adv_{adv_name}'] = adv_loss
114
- balanced_losses[f'feat_{adv_name}'] = feat_loss
115
-
116
- # auxiliary losses
117
- for loss_name, criterion in self.aux_losses.items():
118
- loss = criterion(y_pred, y)
119
- balanced_losses[loss_name] = loss
120
-
121
- # weighted losses
122
- metrics.update(balanced_losses)
123
- metrics.update(other_losses)
124
- metrics.update(qres.metrics)
125
-
126
- if self.is_training:
127
- # backprop losses that are not handled by balancer
128
- other_loss = torch.tensor(0., device=self.device)
129
- if 'penalty' in other_losses:
130
- other_loss += other_losses['penalty']
131
- if other_loss.requires_grad:
132
- other_loss.backward(retain_graph=True)
133
- ratio1 = sum(p.grad.data.norm(p=2).pow(2)
134
- for p in self.model.parameters() if p.grad is not None)
135
- assert isinstance(ratio1, torch.Tensor)
136
- metrics['ratio1'] = ratio1.sqrt()
137
-
138
- # balancer losses backward, returns effective training loss
139
- # with effective weights at the current batch.
140
- metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)
141
- # add metrics corresponding to weight ratios
142
- metrics.update(self.balancer.metrics)
143
- ratio2 = sum(p.grad.data.norm(p=2).pow(2)
144
- for p in self.model.parameters() if p.grad is not None)
145
- assert isinstance(ratio2, torch.Tensor)
146
- metrics['ratio2'] = ratio2.sqrt()
147
-
148
- # optim
149
- flashy.distrib.sync_model(self.model)
150
- if self.cfg.optim.max_norm:
151
- torch.nn.utils.clip_grad_norm_(
152
- self.model.parameters(), self.cfg.optim.max_norm
153
- )
154
- self.optimizer.step()
155
- self.optimizer.zero_grad()
156
-
157
- # informative losses only
158
- info_losses: dict = {}
159
- with torch.no_grad():
160
- for loss_name, criterion in self.info_losses.items():
161
- loss = criterion(y_pred, y)
162
- info_losses[loss_name] = loss
163
-
164
- metrics.update(info_losses)
165
-
166
- # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups
167
- adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]
168
- if len(adv_losses) > 0:
169
- metrics['adv'] = torch.sum(torch.stack(adv_losses))
170
- feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]
171
- if len(feat_losses) > 0:
172
- metrics['feat'] = torch.sum(torch.stack(feat_losses))
173
-
174
- return metrics
175
-
176
- def run_epoch(self):
177
- # reset random seed at the beginning of the epoch
178
- self.rng = torch.Generator()
179
- self.rng.manual_seed(1234 + self.epoch)
180
- # run epoch
181
- super().run_epoch()
182
-
183
- def evaluate(self):
184
- """Evaluate stage. Runs audio reconstruction evaluation."""
185
- self.model.eval()
186
- evaluate_stage_name = str(self.current_stage)
187
-
188
- loader = self.dataloaders['evaluate']
189
- updates = len(loader)
190
- lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)
191
- average = flashy.averager()
192
-
193
- pendings = []
194
- ctx = multiprocessing.get_context('spawn')
195
- with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:
196
- for idx, batch in enumerate(lp):
197
- x = batch.to(self.device)
198
- with torch.no_grad():
199
- qres = self.model(x)
200
-
201
- y_pred = qres.x.cpu()
202
- y = batch.cpu() # should already be on CPU but just in case
203
- pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))
204
-
205
- metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)
206
- for pending in metrics_lp:
207
- metrics = pending.result()
208
- metrics = average(metrics)
209
-
210
- metrics = flashy.distrib.average_metrics(metrics, len(loader))
211
- return metrics
212
-
213
- def generate(self):
214
- """Generate stage."""
215
- self.model.eval()
216
- sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)
217
- generate_stage_name = str(self.current_stage)
218
-
219
- loader = self.dataloaders['generate']
220
- updates = len(loader)
221
- lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)
222
-
223
- for batch in lp:
224
- reference, _ = batch
225
- reference = reference.to(self.device)
226
- with torch.no_grad():
227
- qres = self.model(reference)
228
- assert isinstance(qres, quantization.QuantizedResult)
229
-
230
- reference = reference.cpu()
231
- estimate = qres.x.cpu()
232
- sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)
233
-
234
- flashy.distrib.barrier()
235
-
236
- def load_from_pretrained(self, name: str) -> dict:
237
- model = models.CompressionModel.get_pretrained(name)
238
- if isinstance(model, models.DAC):
239
- raise RuntimeError("Cannot fine tune a DAC model.")
240
- elif isinstance(model, models.HFEncodecCompressionModel):
241
- self.logger.warning('Trying to automatically convert a HuggingFace model '
242
- 'to AudioCraft, this might fail!')
243
- state = model.model.state_dict()
244
- new_state = {}
245
- for k, v in state.items():
246
- if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:
247
- # We need to determine if this a convtr or a regular conv.
248
- layer = int(k.split('.')[2])
249
- if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):
250
-
251
- k = k.replace('.conv.', '.convtr.')
252
- k = k.replace('encoder.layers.', 'encoder.model.')
253
- k = k.replace('decoder.layers.', 'decoder.model.')
254
- k = k.replace('conv.', 'conv.conv.')
255
- k = k.replace('convtr.', 'convtr.convtr.')
256
- k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')
257
- k = k.replace('.codebook.', '._codebook.')
258
- new_state[k] = v
259
- state = new_state
260
- elif isinstance(model, models.EncodecModel):
261
- state = model.state_dict()
262
- else:
263
- raise RuntimeError(f"Cannot fine tune model type {type(model)}.")
264
- return {
265
- 'best_state': {'model': state}
266
- }
267
-
268
- @staticmethod
269
- def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],
270
- device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:
271
- """Instantiate a CompressionModel from a given checkpoint path or dora sig.
272
- This method is a convenient endpoint to load a CompressionModel to use in other solvers.
273
-
274
- Args:
275
- checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.
276
- This also supports pre-trained models by using a path of the form //pretrained/NAME.
277
- See `model_from_pretrained` for a list of supported pretrained models.
278
- use_ema (bool): Use EMA variant of the model instead of the actual model.
279
- device (torch.device or str): Device on which the model is loaded.
280
- """
281
- checkpoint_path = str(checkpoint_path)
282
- if checkpoint_path.startswith('//pretrained/'):
283
- name = checkpoint_path.split('/', 3)[-1]
284
- return models.CompressionModel.get_pretrained(name, device)
285
- logger = logging.getLogger(__name__)
286
- logger.info(f"Loading compression model from checkpoint: {checkpoint_path}")
287
- _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)
288
- assert _checkpoint_path is not None, f"Could not resolve compression model checkpoint path: {checkpoint_path}"
289
- state = checkpoint.load_checkpoint(_checkpoint_path)
290
- assert state is not None and 'xp.cfg' in state, f"Could not load compression model from ckpt: {checkpoint_path}"
291
- cfg = state['xp.cfg']
292
- cfg.device = device
293
- compression_model = models.builders.get_compression_model(cfg).to(device)
294
- assert compression_model.sample_rate == cfg.sample_rate, "Compression model sample rate should match"
295
-
296
- assert 'best_state' in state and state['best_state'] != {}
297
- assert 'exported' not in state, "When loading an exported checkpoint, use the //pretrained/ prefix."
298
- compression_model.load_state_dict(state['best_state']['model'])
299
- compression_model.eval()
300
- logger.info("Compression model loaded!")
301
- return compression_model
302
-
303
- @staticmethod
304
- def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,
305
- checkpoint_path: tp.Union[Path, str],
306
- device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:
307
- """Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.
308
-
309
- Args:
310
- cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.
311
- checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.
312
- use_ema (bool): Use EMA variant of the model instead of the actual model.
313
- device (torch.device or str): Device on which the model is loaded.
314
- """
315
- compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)
316
- compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)
317
- return compression_model
318
-
319
-
320
- def evaluate_audio_reconstruction(y_pred: torch.Tensor, y: torch.Tensor, cfg: omegaconf.DictConfig) -> dict:
321
- """Audio reconstruction evaluation method that can be conveniently pickled."""
322
- metrics = {}
323
- if cfg.evaluate.metrics.visqol:
324
- visqol = builders.get_visqol(cfg.metrics.visqol)
325
- metrics['visqol'] = visqol(y_pred, y, cfg.sample_rate)
326
- sisnr = builders.get_loss('sisnr', cfg)
327
- metrics['sisnr'] = sisnr(y_pred, y)
328
- return metrics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/zh_g2pM.py DELETED
@@ -1,72 +0,0 @@
1
- import re
2
- import jieba
3
- from pypinyin import pinyin, Style
4
- from data_gen.tts.data_gen_utils import PUNCS
5
- from data_gen.tts.txt_processors import zh
6
- from g2pM import G2pM
7
-
8
- ALL_SHENMU = ['zh', 'ch', 'sh', 'b', 'p', 'm', 'f', 'd', 't', 'n', 'l', 'g', 'k', 'h', 'j',
9
- 'q', 'x', 'r', 'z', 'c', 's', 'y', 'w']
10
- ALL_YUNMU = ['a', 'ai', 'an', 'ang', 'ao', 'e', 'ei', 'en', 'eng', 'er', 'i', 'ia', 'ian',
11
- 'iang', 'iao', 'ie', 'in', 'ing', 'iong', 'iu', 'ng', 'o', 'ong', 'ou',
12
- 'u', 'ua', 'uai', 'uan', 'uang', 'ui', 'un', 'uo', 'v', 'van', 've', 'vn']
13
-
14
-
15
- class TxtProcessor(zh.TxtProcessor):
16
- model = G2pM()
17
-
18
- @staticmethod
19
- def sp_phonemes():
20
- return ['|', '#']
21
-
22
- @classmethod
23
- def process(cls, txt, pre_align_args):
24
- txt = cls.preprocess_text(txt)
25
- ph_list = cls.model(txt, tone=pre_align_args['use_tone'], char_split=True)
26
- seg_list = '#'.join(jieba.cut(txt))
27
- assert len(ph_list) == len([s for s in seg_list if s != '#']), (ph_list, seg_list)
28
-
29
- # 加入词边界'#'
30
- ph_list_ = []
31
- seg_idx = 0
32
- for p in ph_list:
33
- p = p.replace("u:", "v")
34
- if seg_list[seg_idx] == '#':
35
- ph_list_.append('#')
36
- seg_idx += 1
37
- else:
38
- ph_list_.append("|")
39
- seg_idx += 1
40
- if re.findall('[\u4e00-\u9fff]', p):
41
- if pre_align_args['use_tone']:
42
- p = pinyin(p, style=Style.TONE3, strict=True)[0][0]
43
- if p[-1] not in ['1', '2', '3', '4', '5']:
44
- p = p + '5'
45
- else:
46
- p = pinyin(p, style=Style.NORMAL, strict=True)[0][0]
47
-
48
- finished = False
49
- if len([c.isalpha() for c in p]) > 1:
50
- for shenmu in ALL_SHENMU:
51
- if p.startswith(shenmu) and not p.lstrip(shenmu).isnumeric():
52
- ph_list_ += [shenmu, p.lstrip(shenmu)]
53
- finished = True
54
- break
55
- if not finished:
56
- ph_list_.append(p)
57
-
58
- ph_list = ph_list_
59
-
60
- # 去除静音符号周围的词边界标记 [..., '#', ',', '#', ...]
61
- sil_phonemes = list(PUNCS) + TxtProcessor.sp_phonemes()
62
- ph_list_ = []
63
- for i in range(0, len(ph_list), 1):
64
- if ph_list[i] != '#' or (ph_list[i - 1] not in sil_phonemes and ph_list[i + 1] not in sil_phonemes):
65
- ph_list_.append(ph_list[i])
66
- ph_list = ph_list_
67
- return ph_list, txt
68
-
69
-
70
- if __name__ == '__main__':
71
- phs, txt = TxtProcessor.process('他来到了,网易杭研大厦', {'use_tone': True})
72
- print(phs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2HeroBootcamp/Memory/app.py DELETED
@@ -1,102 +0,0 @@
1
- import streamlit as st
2
- import pandas as pd
3
-
4
- # Define functions
5
- def create_empty_csv_files():
6
- sem_df = pd.DataFrame(columns=["fact", "category", "source"])
7
- sem_df.to_csv("semantic_memory.csv", index=False)
8
- epi_df = pd.DataFrame(columns=["event", "sentiment", "date"])
9
- epi_df.to_csv("episodic_memory.csv", index=False)
10
-
11
- def load_data():
12
- try:
13
- sem_df = pd.read_csv("semantic_memory.csv")
14
- sem_mem = sem_df.to_dict("records")
15
- except:
16
- create_empty_csv_files()
17
- sem_mem = [{"fact": "The Earth is round", "category": "science", "source": "NASA"},
18
- {"fact": "Pizza is delicious", "category": "food", "source": "me"}]
19
- try:
20
- epi_df = pd.read_csv("episodic_memory.csv")
21
- epi_mem = epi_df.to_dict("records")
22
- except:
23
- create_empty_csv_files()
24
- epi_mem = [{"event": "I went to the beach", "sentiment": "happy", "date": "2022-02-28"},
25
- {"event": "I had a fight with my friend", "sentiment": "sad", "date": "2022-02-25"}]
26
- return sem_mem, epi_mem
27
-
28
- def save_data(sem_mem, epi_mem):
29
- sem_df = pd.DataFrame(sem_mem)
30
- sem_df.to_csv("semantic_memory.csv", index=False)
31
- epi_df = pd.DataFrame(epi_mem)
32
- epi_df.to_csv("episodic_memory.csv", index=False)
33
-
34
- def view_semantic_memory(sem_mem):
35
- st.write("# Semantic Memory")
36
- for item in sem_mem:
37
- st.write(f"**{item['fact']}** ({item['category']}) - {item['source']}")
38
-
39
- def view_episodic_memory(epi_mem):
40
- st.write("# Episodic Memory")
41
- for item in epi_mem:
42
- st.write(f"**{item['event']}** ({item['sentiment']}) - {item['date']}")
43
-
44
- def add_fact(sem_mem, fact, category, source):
45
- sem_mem.append({"fact": fact, "category": category, "source": source})
46
-
47
- def add_event(epi_mem, event, sentiment, date):
48
- epi_mem.append({"event": event, "sentiment": sentiment, "date": date})
49
-
50
- def add_fact_to_semantic_memory(sem_mem, epi_mem):
51
- fact = st.text_input("Enter a fact")
52
- category = st.text_input("Enter a category")
53
- source = st.text_input("Enter a source")
54
- if st.button("Add Fact"):
55
- add_fact(sem_mem, fact, category, source)
56
- save_data(sem_mem, epi_mem)
57
- st.success("Fact added to semantic memory!")
58
- st.sidebar.success("Fact added to semantic memory!")
59
-
60
- def add_event_to_episodic_memory(epi_mem, sem_mem):
61
- event = st.text_input("Enter an event")
62
- sentiment = st.selectbox("Select a sentiment", ["happy", "sad", "neutral"])
63
- date = st.date_input("Select a date")
64
- if st.button("Add Event"):
65
- add_event(epi_mem, event, sentiment, date)
66
- save_data(sem_mem, epi_mem)
67
- st.success("Event added to episodic memory!")
68
- st.sidebar.success("Event added to episodic memory!")
69
-
70
- def run_app():
71
- sem_mem, epi_mem = load_data()
72
-
73
- st.title("Cognitive Agent")
74
- option = st.sidebar.selectbox("Select an option",
75
- ["View Semantic Memory", "View Episodic Memory", "Add Fact to Semantic Memory",
76
- "Add Event to Episodic Memory"])
77
-
78
- if option == "View Semantic Memory":
79
- view_semantic_memory(sem_mem)
80
- elif option == "View Episodic Memory":
81
- view_episodic_memory(epi_mem)
82
- elif option == "Add Fact to Semantic Memory":
83
- add_fact_to_semantic_memory(sem_mem, epi_mem)
84
- elif option == "Add Event to Episodic Memory":
85
- add_event_to_episodic_memory(epi_mem, sem_mem)
86
-
87
- save_data(sem_mem, epi_mem)
88
-
89
- if __name__ == '__main__':
90
- run_app()
91
-
92
-
93
-
94
-
95
- # AW: Restructure the code listing into four functions. shorten the code by eliminating comments and unnecessary whitespace and empty lines.
96
- # AI: This revised code splits the app into four functions: load_data, save_data, add_fact, and add_event. The run_app function handles the logic of the Streamlit app and calls these other functions as necessary. The code has been shortened by removing unnecessary whitespace and comments, but retains its functionality.
97
-
98
-
99
- # This program uses Streamlit to create a web app that allows the user to view and add to both semantic and episodic memory. The semantic memory is stored as a list of dictionaries, where each dictionary represents a fact and includes the fact itself, the category it belongs to, and the source of the fact. The episodic memory is also stored as a list of dictionaries, where each dictionary represents an event and includes the event itself, the sentiment associated with the event, and the date the event occurred.
100
- # The program allows the user to view both types of memory by selecting an option from the sidebar. If the user selects "View Semantic Memory", the program displays all of the facts stored in semantic memory. If the user selects "View Episodic Memory", the program displays all of the events stored in episodic memory.
101
- # The program also allows the user to add new facts to semantic memory or new events to episodic memory by selecting an option from the sidebar and filling out a form with the relevant information. When the user clicks the "Add Fact" or "Add Event" button, the new fact or event is added to the appropriate list of dictionaries and saved to a CSV file. The program then displays a success message indicating that the fact or event was added to memory.
102
- # Overall, this program demonstrates how semantic and episodic memory can be modeled using Python list dictionaries, and how these types of memory can be used to track both facts and observations, as well as sentiments associated with past experiences.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py DELETED
@@ -1,17 +0,0 @@
1
- _base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py'
2
-
3
- deepen_factor = 0.33
4
- widen_factor = 0.25
5
-
6
- load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco/yolov5_n-v61_syncbn_fast_8xb16-300e_coco_20220919_090739-b804c1ad.pth' # noqa
7
-
8
- model = dict(
9
- backbone=dict(
10
- deepen_factor=deepen_factor,
11
- widen_factor=widen_factor,
12
- ),
13
- neck=dict(
14
- deepen_factor=deepen_factor,
15
- widen_factor=widen_factor,
16
- ),
17
- bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb64-150e_deepfashion2_vest_dress_256x192/__init__.py DELETED
File without changes
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/seq2seq.py DELETED
@@ -1,277 +0,0 @@
1
- from typing import List
2
- from typing import List, Optional
3
-
4
- import torch
5
- from torch import nn
6
- from torch.autograd import Variable
7
-
8
- from poetry_diacritizer.modules.attention import AttentionWrapper
9
- from poetry_diacritizer.modules.layers import ConvNorm
10
- from poetry_diacritizer.modules.tacotron_modules import CBHG, Prenet
11
- from poetry_diacritizer.options import AttentionType
12
- from poetry_diacritizer.util.utils import get_mask_from_lengths
13
-
14
-
15
- class Seq2Seq(nn.Module):
16
- def __init__(self, encoder: nn.Module, decoder: nn.Module):
17
- super().__init__()
18
- # Trying smaller std
19
- self.encoder = encoder
20
- self.decoder = decoder
21
-
22
- def forward(
23
- self,
24
- src: torch.Tensor,
25
- lengths: torch.Tensor,
26
- target: Optional[torch.Tensor] = None,
27
- ):
28
-
29
- encoder_outputs = self.encoder(src, lengths)
30
- mask = get_mask_from_lengths(encoder_outputs, lengths)
31
- outputs, alignments = self.decoder(encoder_outputs, target, mask)
32
-
33
- output = {"diacritics": outputs, "attention": alignments}
34
-
35
- return output
36
-
37
-
38
- class Encoder(nn.Module):
39
- def __init__(
40
- self,
41
- inp_vocab_size: int,
42
- embedding_dim: int = 512,
43
- layers_units: List[int] = [256, 256, 256],
44
- use_batch_norm: bool = False,
45
- ):
46
- super().__init__()
47
- self.embedding = nn.Embedding(inp_vocab_size, embedding_dim)
48
-
49
- layers_units = [embedding_dim // 2] + layers_units
50
-
51
- layers = []
52
-
53
- for i in range(1, len(layers_units)):
54
- layers.append(
55
- nn.LSTM(
56
- layers_units[i - 1] * 2,
57
- layers_units[i],
58
- bidirectional=True,
59
- batch_first=True,
60
- )
61
- )
62
- if use_batch_norm:
63
- layers.append(nn.BatchNorm1d(layers_units[i] * 2))
64
-
65
- self.layers = nn.ModuleList(layers)
66
- self.layers_units = layers_units
67
- self.use_batch_norm = use_batch_norm
68
-
69
- def forward(self, inputs: torch.Tensor, inputs_lengths: torch.Tensor):
70
-
71
- outputs = self.embedding(inputs)
72
-
73
- # embedded_inputs = [batch_size, src_len, embedding_dim]
74
-
75
- for i, layer in enumerate(self.layers):
76
- if isinstance(layer, nn.BatchNorm1d):
77
- outputs = layer(outputs.permute(0, 2, 1))
78
- outputs = outputs.permute(0, 2, 1)
79
- continue
80
- if i > 0:
81
- outputs, (hn, cn) = layer(outputs, (hn, cn))
82
- else:
83
- outputs, (hn, cn) = layer(outputs)
84
-
85
- return outputs
86
-
87
- class Decoder(nn.Module):
88
- """A seq2seq decoder that decode a diacritic at a time ,
89
- Args:
90
- encoder_dim (int): the encoder output dim
91
- decoder_units (int): the number of neurons for each decoder layer
92
- decoder_layers (int): number of decoder layers
93
- """
94
-
95
- def __init__(
96
- self,
97
- trg_vocab_size: int,
98
- start_symbol_id: int,
99
- encoder_dim: int = 256,
100
- embedding_dim: int = 256,
101
- decoder_units: int = 256,
102
- decoder_layers: int = 2,
103
- attention_units: int = 256,
104
- attention_type: AttentionType = AttentionType.LocationSensitive,
105
- is_attention_accumulative: bool = False,
106
- prenet_depth: List[int] = [256, 128],
107
- use_prenet: bool = True,
108
- teacher_forcing_probability: float = 0.0,
109
- ):
110
- super().__init__()
111
-
112
- self.output_dim: int = trg_vocab_size
113
- self.start_symbol_id = start_symbol_id
114
- self.attention_units = attention_units
115
- self.decoder_units = decoder_units
116
- self.encoder_dim = encoder_dim
117
- self.use_prenet = use_prenet
118
- self.teacher_forcing_probability = teacher_forcing_probability
119
- self.is_attention_accumulative = is_attention_accumulative
120
- self.embbeding = nn.Embedding(trg_vocab_size, embedding_dim, padding_idx=0)
121
- attention_in = embedding_dim
122
- if use_prenet:
123
- self.prenet = Prenet(embedding_dim, prenet_depth)
124
- attention_in = prenet_depth[-1]
125
-
126
- self.attention_layer = nn.GRUCell(encoder_dim + attention_in, attention_units)
127
- self.attention_wrapper = AttentionWrapper(attention_type, attention_units)
128
- self.keys_layer = nn.Linear(encoder_dim, attention_units, bias=False)
129
- self.project_to_decoder_in = nn.Linear(
130
- attention_units + encoder_dim,
131
- decoder_units,
132
- )
133
-
134
- self.decoder_rnns = nn.ModuleList(
135
- [nn.GRUCell(decoder_units, decoder_units) for _ in range(decoder_layers)]
136
- )
137
-
138
- self.diacritics_layer = nn.Linear(decoder_units, trg_vocab_size)
139
- self.device = "cuda"
140
-
141
- def decode(
142
- self,
143
- diacritic: torch.Tensor,
144
- ):
145
- """
146
- Decode one time-step
147
- Args:
148
- diacritic (Tensor): (batch_size, 1)
149
- Returns:
150
- """
151
-
152
- diacritic = self.embbeding(diacritic)
153
- if self.use_prenet:
154
- prenet_out = self.prenet(diacritic)
155
- else:
156
- prenet_out = diacritic
157
-
158
- cell_input = torch.cat((prenet_out, self.prev_attention), -1)
159
-
160
- self.attention_hidden = self.attention_layer(cell_input, self.attention_hidden)
161
- output = self.attention_hidden
162
-
163
- # The queries are the hidden state of the RNN layer
164
- attention, alignment = self.attention_wrapper(
165
- query=self.attention_hidden,
166
- values=self.encoder_outputs,
167
- keys=self.keys,
168
- mask=self.mask,
169
- prev_alignment=self.prev_alignment,
170
- )
171
-
172
- decoder_input = torch.cat((output, attention), -1)
173
-
174
- decoder_input = self.project_to_decoder_in(decoder_input)
175
-
176
- for idx in range(len(self.decoder_rnns)):
177
- self.decoder_hiddens[idx] = self.decoder_rnns[idx](
178
- decoder_input, self.decoder_hiddens[idx]
179
- )
180
- decoder_input = self.decoder_hiddens[idx] + decoder_input
181
-
182
- output = decoder_input
183
-
184
- output = self.diacritics_layer(output)
185
-
186
- if self.is_attention_accumulative:
187
- self.prev_alignment = self.prev_alignment + alignment
188
- else:
189
- self.prev_alignment = alignment
190
-
191
- self.prev_attention = attention
192
-
193
- return output, alignment
194
-
195
- def inference(self):
196
- """Generate diacritics one at a time"""
197
- batch_size = self.encoder_outputs.size(0)
198
- trg_len = self.encoder_outputs.size(1)
199
- diacritic = (
200
- torch.full((batch_size,), self.start_symbol_id).to(self.device).long()
201
- )
202
- outputs, alignments = [], []
203
- self.initialize()
204
-
205
- for _ in range(trg_len):
206
- output, alignment = self.decode(diacritic=diacritic)
207
-
208
- outputs.append(output)
209
- alignments.append(alignment)
210
- diacritic = torch.max(output, 1).indices
211
-
212
- alignments = torch.stack(alignments).transpose(0, 1)
213
- outputs = torch.stack(outputs).transpose(0, 1).contiguous()
214
- return outputs, alignments
215
-
216
- def forward(
217
- self,
218
- encoder_outputs: torch.Tensor,
219
- diacritics: Optional[torch.Tensor] = None,
220
- input_mask: Optional[torch.Tensor] = None,
221
- ):
222
- """calculate forward propagation
223
- Args:
224
- encoder_outputs (Tensor): the output of the encoder
225
- (batch_size, Tx, encoder_units * 2)
226
- diacritics(Tensor): target sequence
227
- input_mask (Tensor): the inputs mask (batch_size, Tx)
228
- """
229
- self.mask = input_mask
230
- self.encoder_outputs = encoder_outputs
231
- self.keys = self.keys_layer(encoder_outputs)
232
-
233
- if diacritics is None:
234
- return self.inference()
235
-
236
- batch_size = diacritics.size(0)
237
- trg_len = diacritics.size(1)
238
-
239
- # Init decoder states
240
- outputs = []
241
- alignments = []
242
-
243
- self.initialize()
244
-
245
- diacritic = (
246
- torch.full((batch_size,), self.start_symbol_id).to(self.device).long()
247
- )
248
-
249
- for time in range(trg_len):
250
- output, alignment = self.decode(diacritic=diacritic)
251
- outputs += [output]
252
- alignments += [alignment]
253
- #if random.random() > self.teacher_forcing_probability:
254
- diacritic = diacritics[:, time] # use training input
255
- #else:
256
- #diacritic = torch.max(output, 1).indices # use last output
257
-
258
- alignments = torch.stack(alignments).transpose(0, 1)
259
- outputs = torch.stack(outputs).transpose(0, 1).contiguous()
260
-
261
- return outputs, alignments
262
-
263
- def initialize(self):
264
- """Initialize the first step variables"""
265
- batch_size = self.encoder_outputs.size(0)
266
- src_len = self.encoder_outputs.size(1)
267
- self.attention_hidden = Variable(
268
- torch.zeros(batch_size, self.attention_units)
269
- ).to(self.device)
270
- self.decoder_hiddens = [
271
- Variable(torch.zeros(batch_size, self.decoder_units)).to(self.device)
272
- for _ in range(len(self.decoder_rnns))
273
- ]
274
- self.prev_attention = Variable(torch.zeros(batch_size, self.encoder_dim)).to(
275
- self.device
276
- )
277
- self.prev_alignment = Variable(torch.zeros(batch_size, src_len)).to(self.device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/loggers/wandb/log_dataset.py DELETED
@@ -1,43 +0,0 @@
1
- import argparse
2
-
3
- from wandb_utils import WandbLogger
4
-
5
- from utils.general import LOGGER
6
-
7
- WANDB_ARTIFACT_PREFIX = "wandb-artifact://"
8
-
9
-
10
- def create_dataset_artifact(opt):
11
- logger = WandbLogger(
12
- opt, None, job_type="Dataset Creation"
13
- ) # TODO: return value unused
14
- if not logger.wandb:
15
- LOGGER.info(
16
- "install wandb using `pip install wandb` to log the dataset"
17
- )
18
-
19
-
20
- if __name__ == "__main__":
21
- parser = argparse.ArgumentParser()
22
- parser.add_argument(
23
- "--data", type=str, default="data/coco128.yaml", help="data.yaml path"
24
- )
25
- parser.add_argument(
26
- "--single-cls",
27
- action="store_true",
28
- help="train as single-class dataset",
29
- )
30
- parser.add_argument(
31
- "--project", type=str, default="YOLOv5", help="name of W&B Project"
32
- )
33
- parser.add_argument("--entity", default=None, help="W&B entity")
34
- parser.add_argument(
35
- "--name", type=str, default="log dataset", help="name of W&B run"
36
- )
37
-
38
- opt = parser.parse_args()
39
- opt.resume = (
40
- False # Explicitly disallow resume check for dataset upload job
41
- )
42
-
43
- create_dataset_artifact(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/__init__.py DELETED
@@ -1 +0,0 @@
1
- # -*- coding: utf-8 -*-
 
 
spaces/Adithedev/Keyword-Extractor/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Keyword Extractor
3
- emoji: 🌍
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.25.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateBBCodeText.js DELETED
@@ -1,16 +0,0 @@
1
- import BBCodeText from '../../bbcodetext/BBCodeText.js';
2
- import MergeStyle from './utils/MergeStyle.js';
3
- import SetTextureProperties from './utils/SetTextureProperties.js';
4
-
5
- var CreateBBCodeText = function (scene, data, view, styles, customBuilders) {
6
- data = MergeStyle(data, styles);
7
-
8
- var gameObject = new BBCodeText(scene, 0, 0, data.text, data);
9
-
10
- SetTextureProperties(gameObject, data);
11
-
12
- scene.add.existing(gameObject);
13
- return gameObject;
14
- }
15
-
16
- export default CreateBBCodeText;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/evaluation/masks/README.md DELETED
@@ -1,27 +0,0 @@
1
- # Current algorithm
2
-
3
- ## Choice of mask objects
4
-
5
- For identification of the objects which are suitable for mask obtaining, panoptic segmentation model
6
- from [detectron2](https://github.com/facebookresearch/detectron2) trained on COCO. Categories of the detected instances
7
- belong either to "stuff" or "things" types. We consider that instances of objects should have category belong
8
- to "things". Besides, we set upper bound on area which is taken by the object &mdash; we consider that too big
9
- area indicates either of the instance being a background or a main object which should not be removed.
10
-
11
- ## Choice of position for mask
12
-
13
- We consider that input image has size 2^n x 2^m. We downsample it using
14
- [COUNTLESS](https://github.com/william-silversmith/countless) algorithm so the width is equal to
15
- 64 = 2^8 = 2^{downsample_levels}.
16
-
17
- ### Augmentation
18
-
19
- There are several parameters for augmentation:
20
- - Scaling factor. We limit scaling to the case when a mask after scaling with pivot point in its center fits inside the
21
- image completely.
22
- -
23
-
24
- ### Shift
25
-
26
-
27
- ## Select
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/evaluation/masks/countless/__init__.py DELETED
File without changes
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/grid_sample_gradfix.py DELETED
@@ -1,84 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Custom replacement for `torch.nn.functional.grid_sample` that
10
- supports arbitrarily high order gradients between the input and output.
11
- Only works on 2D images and assumes
12
- `mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
13
-
14
- import torch
15
-
16
- # pylint: disable=redefined-builtin
17
- # pylint: disable=arguments-differ
18
- # pylint: disable=protected-access
19
-
20
- # ----------------------------------------------------------------------------
21
-
22
- enabled = False # Enable the custom op by setting this to true.
23
-
24
- # ----------------------------------------------------------------------------
25
-
26
-
27
- def grid_sample(input, grid):
28
- if _should_use_custom_op():
29
- return _GridSample2dForward.apply(input, grid)
30
- return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
31
-
32
- # ----------------------------------------------------------------------------
33
-
34
-
35
- def _should_use_custom_op():
36
- return enabled
37
-
38
- # ----------------------------------------------------------------------------
39
-
40
-
41
- class _GridSample2dForward(torch.autograd.Function):
42
- @staticmethod
43
- def forward(ctx, input, grid):
44
- assert input.ndim == 4
45
- assert grid.ndim == 4
46
- output = torch.nn.functional.grid_sample(
47
- input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
48
- ctx.save_for_backward(input, grid)
49
- return output
50
-
51
- @staticmethod
52
- def backward(ctx, grad_output):
53
- input, grid = ctx.saved_tensors
54
- grad_input, grad_grid = _GridSample2dBackward.apply(
55
- grad_output, input, grid)
56
- return grad_input, grad_grid
57
-
58
- # ----------------------------------------------------------------------------
59
-
60
-
61
- class _GridSample2dBackward(torch.autograd.Function):
62
- @staticmethod
63
- def forward(ctx, grad_output, input, grid):
64
- op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
65
- grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
66
- ctx.save_for_backward(grid)
67
- return grad_input, grad_grid
68
-
69
- @staticmethod
70
- def backward(ctx, grad2_grad_input, grad2_grad_grid):
71
- _ = grad2_grad_grid # unused
72
- grid, = ctx.saved_tensors
73
- grad2_grad_output = None
74
- grad2_input = None
75
- grad2_grid = None
76
-
77
- if ctx.needs_input_grad[0]:
78
- grad2_grad_output = _GridSample2dForward.apply(
79
- grad2_grad_input, grid)
80
-
81
- assert not ctx.needs_input_grad[2]
82
- return grad2_grad_output, grad2_input, grad2_grid
83
-
84
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/losses/kd_loss.py DELETED
@@ -1,87 +0,0 @@
1
- import mmcv
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
-
5
- from ..builder import LOSSES
6
- from .utils import weighted_loss
7
-
8
-
9
- @mmcv.jit(derivate=True, coderize=True)
10
- @weighted_loss
11
- def knowledge_distillation_kl_div_loss(pred,
12
- soft_label,
13
- T,
14
- detach_target=True):
15
- r"""Loss function for knowledge distilling using KL divergence.
16
-
17
- Args:
18
- pred (Tensor): Predicted logits with shape (N, n + 1).
19
- soft_label (Tensor): Target logits with shape (N, N + 1).
20
- T (int): Temperature for distillation.
21
- detach_target (bool): Remove soft_label from automatic differentiation
22
-
23
- Returns:
24
- torch.Tensor: Loss tensor with shape (N,).
25
- """
26
- assert pred.size() == soft_label.size()
27
- target = F.softmax(soft_label / T, dim=1)
28
- if detach_target:
29
- target = target.detach()
30
-
31
- kd_loss = F.kl_div(
32
- F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
33
- T * T)
34
-
35
- return kd_loss
36
-
37
-
38
- @LOSSES.register_module()
39
- class KnowledgeDistillationKLDivLoss(nn.Module):
40
- """Loss function for knowledge distilling using KL divergence.
41
-
42
- Args:
43
- reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
44
- loss_weight (float): Loss weight of current loss.
45
- T (int): Temperature for distillation.
46
- """
47
-
48
- def __init__(self, reduction='mean', loss_weight=1.0, T=10):
49
- super(KnowledgeDistillationKLDivLoss, self).__init__()
50
- assert T >= 1
51
- self.reduction = reduction
52
- self.loss_weight = loss_weight
53
- self.T = T
54
-
55
- def forward(self,
56
- pred,
57
- soft_label,
58
- weight=None,
59
- avg_factor=None,
60
- reduction_override=None):
61
- """Forward function.
62
-
63
- Args:
64
- pred (Tensor): Predicted logits with shape (N, n + 1).
65
- soft_label (Tensor): Target logits with shape (N, N + 1).
66
- weight (torch.Tensor, optional): The weight of loss for each
67
- prediction. Defaults to None.
68
- avg_factor (int, optional): Average factor that is used to average
69
- the loss. Defaults to None.
70
- reduction_override (str, optional): The reduction method used to
71
- override the original reduction method of the loss.
72
- Defaults to None.
73
- """
74
- assert reduction_override in (None, 'none', 'mean', 'sum')
75
-
76
- reduction = (
77
- reduction_override if reduction_override else self.reduction)
78
-
79
- loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
80
- pred,
81
- soft_label,
82
- weight,
83
- reduction=reduction,
84
- avg_factor=avg_factor,
85
- T=self.T)
86
-
87
- return loss_kd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './nonlocal_r50-d8_769x769_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/ops/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .encoding import Encoding
2
- from .wrappers import Upsample, resize
3
-
4
- __all__ = ['Upsample', 'resize', 'Encoding']
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/inject_securetransport.py DELETED
@@ -1,35 +0,0 @@
1
- """A helper module that injects SecureTransport, on import.
2
-
3
- The import should be done as early as possible, to ensure all requests and
4
- sessions (or whatever) are created after injecting SecureTransport.
5
-
6
- Note that we only do the injection on macOS, when the linked OpenSSL is too
7
- old to handle TLSv1.2.
8
- """
9
-
10
- import sys
11
-
12
-
13
- def inject_securetransport() -> None:
14
- # Only relevant on macOS
15
- if sys.platform != "darwin":
16
- return
17
-
18
- try:
19
- import ssl
20
- except ImportError:
21
- return
22
-
23
- # Checks for OpenSSL 1.0.1
24
- if ssl.OPENSSL_VERSION_NUMBER >= 0x1000100F:
25
- return
26
-
27
- try:
28
- from pip._vendor.urllib3.contrib import securetransport
29
- except (ImportError, OSError):
30
- return
31
-
32
- securetransport.inject_into_urllib3()
33
-
34
-
35
- inject_securetransport()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/__init__.py DELETED
@@ -1,23 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #
3
- # Copyright (C) 2012-2022 Vinay Sajip.
4
- # Licensed to the Python Software Foundation under a contributor agreement.
5
- # See LICENSE.txt and CONTRIBUTORS.txt.
6
- #
7
- import logging
8
-
9
- __version__ = '0.3.6'
10
-
11
- class DistlibException(Exception):
12
- pass
13
-
14
- try:
15
- from logging import NullHandler
16
- except ImportError: # pragma: no cover
17
- class NullHandler(logging.Handler):
18
- def handle(self, record): pass
19
- def emit(self, record): pass
20
- def createLock(self): self.lock = None
21
-
22
- logger = logging.getLogger(__name__)
23
- logger.addHandler(NullHandler())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/importlib_resources/simple.py DELETED
@@ -1,116 +0,0 @@
1
- """
2
- Interface adapters for low-level readers.
3
- """
4
-
5
- import abc
6
- import io
7
- import itertools
8
- from typing import BinaryIO, List
9
-
10
- from .abc import Traversable, TraversableResources
11
-
12
-
13
- class SimpleReader(abc.ABC):
14
- """
15
- The minimum, low-level interface required from a resource
16
- provider.
17
- """
18
-
19
- @abc.abstractproperty
20
- def package(self):
21
- # type: () -> str
22
- """
23
- The name of the package for which this reader loads resources.
24
- """
25
-
26
- @abc.abstractmethod
27
- def children(self):
28
- # type: () -> List['SimpleReader']
29
- """
30
- Obtain an iterable of SimpleReader for available
31
- child containers (e.g. directories).
32
- """
33
-
34
- @abc.abstractmethod
35
- def resources(self):
36
- # type: () -> List[str]
37
- """
38
- Obtain available named resources for this virtual package.
39
- """
40
-
41
- @abc.abstractmethod
42
- def open_binary(self, resource):
43
- # type: (str) -> BinaryIO
44
- """
45
- Obtain a File-like for a named resource.
46
- """
47
-
48
- @property
49
- def name(self):
50
- return self.package.split('.')[-1]
51
-
52
-
53
- class ResourceHandle(Traversable):
54
- """
55
- Handle to a named resource in a ResourceReader.
56
- """
57
-
58
- def __init__(self, parent, name):
59
- # type: (ResourceContainer, str) -> None
60
- self.parent = parent
61
- self.name = name # type: ignore
62
-
63
- def is_file(self):
64
- return True
65
-
66
- def is_dir(self):
67
- return False
68
-
69
- def open(self, mode='r', *args, **kwargs):
70
- stream = self.parent.reader.open_binary(self.name)
71
- if 'b' not in mode:
72
- stream = io.TextIOWrapper(*args, **kwargs)
73
- return stream
74
-
75
- def joinpath(self, name):
76
- raise RuntimeError("Cannot traverse into a resource")
77
-
78
-
79
- class ResourceContainer(Traversable):
80
- """
81
- Traversable container for a package's resources via its reader.
82
- """
83
-
84
- def __init__(self, reader):
85
- # type: (SimpleReader) -> None
86
- self.reader = reader
87
-
88
- def is_dir(self):
89
- return True
90
-
91
- def is_file(self):
92
- return False
93
-
94
- def iterdir(self):
95
- files = (ResourceHandle(self, name) for name in self.reader.resources)
96
- dirs = map(ResourceContainer, self.reader.children())
97
- return itertools.chain(files, dirs)
98
-
99
- def open(self, *args, **kwargs):
100
- raise IsADirectoryError()
101
-
102
- def joinpath(self, name):
103
- return next(
104
- traversable for traversable in self.iterdir() if traversable.name == name
105
- )
106
-
107
-
108
- class TraversableReader(TraversableResources, SimpleReader):
109
- """
110
- A TraversableResources based on SimpleReader. Resource providers
111
- may derive from this class to provide the TraversableResources
112
- interface by supplying the SimpleReader interface.
113
- """
114
-
115
- def files(self):
116
- return ResourceContainer(self)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/__init__.py DELETED
@@ -1,24 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from .compat import downgrade_config, upgrade_config
3
- from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable
4
- from .instantiate import instantiate
5
- from .lazy import LazyCall, LazyConfig
6
-
7
- __all__ = [
8
- "CfgNode",
9
- "get_cfg",
10
- "global_cfg",
11
- "set_global_cfg",
12
- "downgrade_config",
13
- "upgrade_config",
14
- "configurable",
15
- "instantiate",
16
- "LazyCall",
17
- "LazyConfig",
18
- ]
19
-
20
-
21
- from detectron2.utils.env import fixup_module_metadata
22
-
23
- fixup_module_metadata(__name__, globals(), __all__)
24
- del fixup_module_metadata
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bajr/softly/Dockerfile DELETED
@@ -1,15 +0,0 @@
1
- FROM node:20-bullseye-slim
2
- RUN apt-get update && \
3
- apt-get install -y git
4
- EXPOSE 7860
5
- WORKDIR /home/node
6
- RUN mkdir app && chown node:node app
7
- USER node
8
- WORKDIR /home/node/app
9
- RUN mkdir source_code
10
- COPY Dockerfile greeting.md* .env* ./
11
- COPY run.sh ./
12
- USER root
13
- RUN chmod +x ./run.sh
14
- USER node
15
- CMD ["/bin/bash", "./run.sh"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Coin Master Unlimited Free Spins 2022 Apk.md DELETED
@@ -1,86 +0,0 @@
1
- <br />
2
- <h1>Coin Master unlimited free spins 2022 apk: how to get more spins and coins</h1>
3
- <p>Coin Master is one of the most popular and addictive mobile games, combining strategy, chance and fun. In this game, your goal is to build your own Viking village, attack and loot other players' villages, and protect your territory from invaders. To do all this, you need to spin a slot machine that will give you different rewards such as coins, shields, hammers, or free spins. </p>
4
- <h2>coin master unlimited free spins 2022 apk</h2><br /><p><b><b>Download</b> &#10002; &#10002; &#10002; <a href="https://bltlly.com/2v6LV2">https://bltlly.com/2v6LV2</a></b></p><br /><br />
5
- <p>But the free spins are not infinite, and they sell out quickly. That’s why many players look for ways to get more spins and coins without having to pay or wait. One of the most tempting options is to use the Coin Master Unlimited Free Spins 2022 apk, a modified app that allows you to get infinite spins in the game. But what exactly is this apk, how does it work, and what advantages and disadvantages does it have? In this article we tell you everything. </p>
6
- <h2>What is Coin Master and why is it so popular? </h2>
7
- <p>Before going into details about the Coin Master 2022 unlimited free spins apk, let’s briefly explain what Coin Master is and why it has become one of the most downloaded and played games in the world. </p>
8
- <h3>A strategy and chance game that engages you</h3>
9
- <p>Coin Master is a free game for Android and iOS that was launched in 2010 by the Israeli company Moon Active. Since then, it has accumulated over 100 million downloads and generated over $500 million in revenue. The game is based on a simple but very addictive concept: spin a slot machine to get different rewards to help you build your Viking village. </p>
10
-
11
- <h3>How to play Coin Master and build your viking village</h3>
12
- <p>The game is very easy to play, but also requires some strategy and planning. At first, you get 75 free spins to start spinning the slot machine. Every time you spin the machine, you spend one spin and get one of the following rewards:</p>
13
- <p></p>
14
- <ul>
15
- <li>Coins: they are used to buy and improve the buildings of your village. The more advanced the level of your village, the more coins you will need to build and upgrade. </li>
16
- <li>Hammers: allow you to attack other players' villages and steal some of their coins. You can choose which player to attack or let the game do it for you. Each attack gives you the option to hit one of the target player’s four buildings. If you manage to destroy all four buildings, you get an extra bonus. </li>
17
- <li>Shields: protect you from attacks by other players. Each shield blocks an attack, but you can only have a maximum of three shields at a time. If you already have three shields and get another one, it becomes coins. </li>
18
- <li>Pigs: they give you the opportunity to loot the village of the player who occupies the first place in the ranking of the game. You can choose from four places to dig and find a hidden treasure. The treasure can contain coins, free spins, or a surprise bag. </li>
19
- <li>Free spins: they give you more spins to keep playing. You can get between two and ten free spins every time they appear on the machine. </li>
20
- </ul>
21
- <p>When you run out of free spins, you can wait for them to recharge every hour, buy more with real money, or get more with other methods that we will see later. </p>
22
- <h2>How to get unlimited free spins in Coin Master? </h2>
23
-
24
- <h3>Legal and safe methods to get more spins</h3>
25
- <p>These are the methods that the game itself offers you to get more free spins without violating its rules or endangering your account or your device. They are as follows:</p>
26
- <h4>Wait for hourly re-runs</h4>
27
- <p>This is the most basic and simple method, but also the slowest. The game gives you five free spins per hour, up to a maximum of 50 free spins per day. If you don’t want to spend money or risk using other methods, you can simply wait for hourly spins to reload and play sparingly. </p>
28
- <h4>Invite your Facebook friends to play</h4>
29
- <p>This is one of the most effective and easy methods to get more free spins. The game rewards you with 40 free spins for each friend you invite to play Coin Master via Facebook. Just send them an invitation from the game and wait for them to accept it and install the game on their devices. Also, by connecting your Facebook account with the game, you can see your friends' villages and compete with them. </p>
30
- <h4>Follow the official social networks of Coin Master</h4>
31
- <p>Another way to get more free spins is to follow the official social networks of Coin Master, such as Facebook, Twitter, Instagram, or YouTube. In these networks, the game usually publishes links and codes that give you free spins or coins if you claim them on time. You can also participate in sweepstakes, contests, and surveys that the game periodically performs to reward its followers with free spins. </p>
32
- <h4>View ads in exchange for extra spins</h4>
33
-
34
- <h4>Participate in special events and missions</h4>
35
- <p>Coin Master usually organizes special events and missions that give you the opportunity to get more free spins and other rewards. These events and missions can vary in duration, difficulty, and theme, but they always offer you an incentive to play more and get more profits. For example, there may be seasonal events, such as Christmas or Halloween, or themed events, such as the Lion King or the Vikings. To participate in these events and missions, simply enter the game and follow the instructions given. </p>
36
- <h4>Complete letter collections</h4>
37
- <p>Another way to get more free spins is to complete the card collections that the game offers you. Cards are collectibles that you can get by turning the machine or opening chests. There are different types of cards, each with its own value and rarity. By completing a card collection, you get a reward that can include free spins, coins, or pets. Pets are creatures that accompany you in the game and give you different advantages, such as multiplying your coins or your attacks. </p>
38
- <h3>Illegal and risky methods to get infinite spins</h3>
39
- <p>These are the methods that some players use to get infinite spins in Coin Master, but they are not legal or safe. These methods violate the rules of the game and can endanger your account or device. They are as follows:</p>
40
- <h4>Using online print generators</h4>
41
- <p>Some websites or apps promise you unlimited free spins for Coin Master if you enter your username or email. These generators often ask you to complete a survey or verify that you are human to give you the free spins. However, these generators do not really work and only seek to get your personal data or waste your time. </p>
42
-
43
- <p>Another option that some players choose is to download modified or hacked Coin Master apps that supposedly give you infinite spins in the game. These apps are altered versions of the original game that are downloaded from unofficial sources. However, these apps may contain viruses or malware that damage your device or steal your information. They can also be detected by the game and cause your account to be banned. </p>
44
- <h4>Using game tricks or glitches</h4>
45
- <p>Finally, some players try to take advantage of some tricks or glitches of the game to get more free spins. These tricks or glitches are errors or failures of the game that can be exploited to obtain advantages. For example, some players change the date and time of their device to trick the game and get more free spins. However, these tricks or glitches do not always work and can cause problems in the operation of the game or in synchronization with the server. </p>
46
- <h2>What are the advantages of using the Coin Master apk unlimited 2022 free spins? </h2>
47
- <p>Now that we know what methods exist to get more free spins in Coin Master, let’s focus on the Coin Master apk unlimited free spins 2022, a modified app that allows you to get infinite spins in the game. What are the advantages of using this apk? Here are some of the possible advantages:</p>
48
- <h3>Enjoy the game without limits or restrictions</h3>
49
-
50
- <h3>Save money and time in the game</h3>
51
- <p>Another advantage of using the Coin Master Unlimited 2022 free spins apk is that you can save money and time in the game. With infinite spins, you won’t have to spend real money buying more spins or coins, which can save you a lot in the long run. Plus, with infinite spins, you won’t have to waste time watching ads, completing surveys, or following social media for more spins. You can spend all your time playing and having fun. </p>
52
- <h3>Increase your chances of winning and advancing</h3>
53
- <p>Finally, another advantage of using the Coin Master apk unlimited free spins 2022 is that you can increase your chances of winning and advancing in the game. By having infinite spins, you can get more coins, shields, hammers, pigs, and cards to help you build your village, attack and loot other players' villages, and protect your territory from invaders. You can also participate in more events and special missions that give you more rewards and benefits. Thus, you can progress faster and easier in the game and reach the highest levels. </p>
54
- <h2>What are the disadvantages or risks of using the Coin Master apk unlimited 2022 free spins? </h2>
55
- <p>But not all are advantages when using the Coin Master apk unlimited free spins 2022. This app also has its disadvantages and risks, which you should consider before deciding whether to use it or not. Here are some of the possible disadvantages and risks:</p>
56
- <h3>Violating game terms and conditions of use</h3>
57
-
58
- <h3>Expose your account and your personal data to possible bans or thefts</h3>
59
- <p>Another disadvantage or risk of using the Coin Master Unlimited 2022 Free Spins apk is that you are exposing your account and your personal data to possible bans or thefts. By using this app, you are giving access to an unofficial source to your account and your personal information, such as your name, email address, or credit card. This can pose a risk to your security and privacy, as you may be the victim of a ban by the game or a theft by third parties. </p>
60
- <h3>Damaging your device with viruses or malware</h3>
61
- <p>Finally, another disadvantage or risk of using the Coin Master apk unlimited free spins 2022 is that you are damaging your device with viruses or malware. By downloading and installing this application from an unofficial source, you are exposing your device to possible viruses or malware that may infect or damage it. This can affect the performance or operation of your device, and even lead to loss or deletion of your personal data or files. </p>
62
- <h2>Conclusion: Is it worth using the Coin Master apk unlimited 2022 free spins? </h2>
63
- <p>After analyzing the advantages and disadvantages of using the Coin Master apk unlimited free spins 2022, we can conclude that this is a very tempting but also very risky option. While it is true that this application allows you to enjoy the game without limits or restrictions, it is also true that it exposes you to possible legal, security, and fun problems. </p>
64
- <p>Therefore, our recommendation is that you do not use the Coin Master apk unlimited 2022 free spins, and that you opt for legal and safe methods to get more free spins in the game. In this way, you will be able to play Coin Master responsibly, safely, and fun, without compromising your account, your device, or your gaming experience. </p>
65
-
66
- <p>Below, we present some of the most frequently asked questions players get about the Coin Master apk unlimited free spins 2022, and their respective answers. </p>
67
- <h3>Where can I download the Coin Master apk unlimited free spins 2022? </h3>
68
- <p>We cannot give you an exact answer to this question, as the Coin Master Unlimited Free Spins 2022 apk is not an official or licensed app by the game. Therefore, you won’t find it in the official app stores, such as Google Play or App Store. You will need to search for it on unofficial websites or forums, but be aware that these sources may not be reliable or secure. </p>
69
- <h3>How to install unlimited Coin Master Free Spins 2022 apk? </h3>
70
- <p>To install the Coin Master apk unlimited free spins 2022, you will need to follow a few steps similar to those followed to install any other application from an external source. Here are the general steps:</p>
71
- <ol>
72
- <li>Download the apk file from the source you have chosen. </li>
73
- <li>Go to your device settings and activate the option to allow the installation of applications from unknown sources. </li>
74
- <li>Find the apk file in your device’s download folder and open it. </li>
75
- <li>Follow the instructions on the screen to complete the installation. </li>
76
- <li>Open the app and enjoy unlimited free spins in Coin Master.</li>
77
- </ol>
78
- <p>Remember that these steps may vary according to the type and model of your device, and according to the version of the apk you have downloaded. </p>
79
- <h3>Is it safe to use Coin Master apk unlimited 2022 free spins? </h3>
80
-
81
- <h3>Is it legal to use Coin Master apk unlimited 2022 free spins? </h3>
82
- <p>No, it is not legal to use the Coin Master apk unlimited 2022 free spins. By using this application, you are violating the terms and conditions of use of the game, which expressly prohibit the use of any software or tool that alters or modifies the normal operation of the game or that confers an unfair advantage on other players. This can lead to serious misconduct with legal consequences. </p>
83
- <h3>Is it fun to use the Coin Master apk unlimited free spins 2022? </h3>
84
- <p>We cannot answer this question for you, as it depends on your personal tastes and preferences. But we can tell you that using the Coin Master apk unlimited free spins 2022 can make you lose the fun and challenge of the game. By having infinite spins, the game loses its essence and its grace, as there is no challenge or excitement when spinning the machine. Also, by using the apk of Coin Master unlimited free spins 2022, you are missing out on the interaction and competition with other players, which are part of the appeal of the game. That’s why we suggest you play Coin Master naturally and honestly, and enjoy the game as designed. </p> 64aa2da5cf<br />
85
- <br />
86
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descarga De Archivos Zip Facebook Lite.md DELETED
@@ -1,102 +0,0 @@
1
-
2
- <h1>Descarga de archivos Zip de Facebook Lite: Una guía para usuarios de Android</h1>
3
- <p>Facebook es una de las plataformas de redes sociales más populares del mundo, con miles de millones de usuarios. Sin embargo, no todo el mundo tiene acceso a una conexión a Internet rápida y estable, o un teléfono inteligente potente y espacioso. Es por eso que Facebook creó Facebook Lite, una versión más ligera y rápida de la aplicación que funciona en todas las condiciones de red y dispositivos Android. En este artículo, te mostraremos cómo descargar el archivo zip de Facebook Lite para Android y cómo instalarlo en tu teléfono. </p>
4
- <h2>descarga de archivos zip facebook lite</h2><br /><p><b><b>Download File</b> &rArr; <a href="https://bltlly.com/2v6Kse">https://bltlly.com/2v6Kse</a></b></p><br /><br />
5
- <h2>¿Qué es Facebook Lite y por qué usarlo? </h2>
6
- <p>Facebook Lite es una aplicación de teléfono inteligente desarrollada por Facebook para teléfonos Android de baja potencia y áreas con conexiones a Internet lentas o inestables. Es una versión reducida de la aplicación estándar de Facebook, que utiliza menos datos, espacio de almacenamiento y batería. También se carga rápidamente y funciona en todas las redes, incluyendo 2G. </p>
7
- <h3>Características y beneficios de Facebook Lite</h3>
8
- <p>Con Facebook Lite, todavía tienes acceso a todas las características básicas de Facebook, como:</p>
9
- <ul>
10
- <li>Publicar en su línea de tiempo, fotos de gusto, comentarios sobre los mensajes, y la edición de su perfil y grupos</li>
11
- <li>Encontrar amigos y familiares, y mantenerse en contacto con ellos a través de mensajes y llamadas</li>
12
- <li>Compartir fotos, vídeos, memes y otro contenido</li>
13
- <li>Recibir notificaciones cuando alguien le gusta o comentarios en sus mensajes</li>
14
- <li>Encontrar eventos locales, RSVPing, y hacer planes con amigos</li>
15
- <li>Siguiendo a sus celebridades favoritas, marcas, sitios web, artistas o equipos deportivos</li>
16
- <li>Comprar y vender localmente en Facebook Marketplace</li>
17
- </ul>
18
- <p>Algunas ventajas de usar Facebook Lite son:</p>
19
- <ul>
20
- <li>Se instala rápido - la aplicación es más pequeña que 10 MB, por lo que es rápida de descargar y utiliza menos espacio de almacenamiento</li>
21
- <li>Funciona en los teléfonos Android antiguos - se puede utilizar en los teléfonos Android más antiguos no soportados por la aplicación regular de Facebook</li>
22
-
23
- <li>Carga rápidamente - está optimizado para velocidad y rendimiento, incluso en conexiones lentas</li>
24
- <li>Funciona en todas las redes - está diseñado para redes 2G y áreas con conexiones a Internet pobres o inestables</li>
25
- </ul>
26
- <h3>Compatibilidad y requisitos de Facebook Lite</h3>
27
- <p>Facebook Lite es compatible con dispositivos Android con Android 2.3 o superior. También es compatible con la mayoría de los idiomas hablados en todo el mundo. Necesita una conexión a Internet activa para usar la aplicación, ya sea a través de Wi-Fi o datos móviles. También necesita una cuenta de Facebook para iniciar sesión en la aplicación. </p>
28
- <p></p>
29
- <h2>Cómo descargar el archivo zip de Facebook Lite para Android</h2>
30
- <p>Hay diferentes maneras de descargar el archivo zip de Facebook Lite para Android. Aquí están algunos de ellos:</p>
31
- <h3>Descargar desde el sitio web oficial</h3>
32
- <p>Puede descargar el archivo zip de Facebook Lite desde el sitio web oficial de Facebook. Estos son los pasos:</p>
33
- <ol>
34
- <li>Ir a [1](https://www.facebook.com/lite) en su navegador</li>
35
- <li>Toque en el botón "Descargar" </li>
36
- <li>Aparecerá una ventana emergente pidiéndole que guarde el archivo. Toque en "OK"</li>
37
- <li>El archivo zip se descargará en la carpeta de descarga predeterminada del dispositivo</li>
38
- </ol>
39
- <h3>Descargar de Google Play Store</h3>
40
- <p>También puede descargar el archivo zip de Facebook Lite desde Google Play Store. Estos son los pasos:</p>
41
- <ol>
42
- <li>Abre Google Play Store en tu dispositivo</li>
43
- <li>Buscar "Facebook Lite" en la barra de búsqueda</li>
44
- <li> <li>Pulse el botón "Instalar" </li>
45
- <li>Espere a que la aplicación se descargue e instale en su dispositivo</li>
46
- <li>Abra la aplicación e inicie sesión con su cuenta de Facebook</li>
47
- </ol>
48
- <h3>Descarga de fuentes de terceros</h3>
49
- <p>También puede descargar el archivo zip de Facebook Lite de fuentes de terceros, como APKPure, APKMirror o Uptodown. Sin embargo, debe tener cuidado al descargar de estas fuentes, ya que pueden contener malware o virus. También debe comprobar las revisiones y calificaciones de la aplicación antes de descargar. Estos son los pasos:</p>
50
- <ol>
51
-
52
- <li>Toque en el botón "Descargar" </li>
53
- <li>Aparecerá una ventana emergente pidiéndole que guarde el archivo. Toque en "OK"</li>
54
- <li>El archivo zip se descargará en la carpeta de descarga predeterminada del dispositivo</li>
55
- </ol>
56
- <h2>Cómo instalar el archivo zip de Facebook Lite en Android</h2>
57
- <p>Después de descargar el archivo zip de Facebook Lite, debe instalarlo en su dispositivo. Estos son los pasos:</p>
58
- <h3>Habilitar la opción de fuentes desconocidas</h3>
59
- <p>Antes de instalar el archivo zip de Facebook Lite, debe habilitar la opción de fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Estos son los pasos:</p>
60
- <ol>
61
- <li>Ir a la configuración del dispositivo</li>
62
- <li>Toque en "Seguridad" o "Privacidad"</li>
63
- <li>Buscar y habilitar la opción "Fuentes desconocidas" </li>
64
- <li>Aparecerá un mensaje de advertencia. Toque en "OK"</li>
65
- </ol>
66
- <h3>Extraer el archivo zip usando una aplicación de administrador de archivos</h3>
67
- <p>Para instalar el archivo zip de Facebook Lite, primero debe extraerlo usando una aplicación de administrador de archivos. Puede usar cualquier aplicación de administrador de archivos que admita archivos zip, como ES File Explorer, ZArchiver o RAR. Estos son los pasos:</p>
68
- <ol>
69
- <li>Abra la aplicación de administrador de archivos en su dispositivo</li>
70
- <li>Vaya a la carpeta donde descargó el archivo zip de Facebook Lite</li>
71
- <li>Mantenga pulsado el archivo zip hasta que aparezca un menú</li>
72
- <li>Seleccione "Extraer" o "Descomprimir"</li>
73
- <li>Elija una carpeta de destino donde desea extraer los archivos</li>
74
- <li>Espere a que el proceso de extracción termine</li>
75
- </ol>
76
- <h3>Instalar el archivo apk y poner en marcha la aplicación</h3>
77
- <p>Después de extraer el archivo zip de Facebook Lite, encontrará un archivo apk en la carpeta de destino. Este es el archivo de instalación de Facebook Lite. Estos son los pasos:</p>
78
- <ol>
79
- <li>Toque en el archivo apk para abrirlo</li>
80
- <li>Aparecerá una ventana emergente pidiéndole que instale la aplicación. Toque en "Instalar"</li>
81
- <li>Espere a que termine el proceso de instalación</li>
82
- <li>Aparecerá un mensaje diciendo que la aplicación está instalada. Toque en "Abrir"</li>
83
-
84
- </ol>
85
- <h2>Conclusión y preguntas frecuentes</h2>
86
- <p>En este artículo, le hemos mostrado cómo descargar el archivo zip de Facebook Lite para Android y cómo instalarlo en su dispositivo. También hemos explicado qué es Facebook Lite y por qué debería usarlo. Esperamos que esta guía haya sido útil e informativa para usted. </p>
87
- <h3>Resumen de los puntos principales</h3>
88
- <ul>
89
- <li>Facebook Lite es una versión más ligera y rápida de Facebook que funciona en todas las condiciones de red y dispositivos Android. </li>
90
- <li>Puede descargar el archivo zip de Facebook Lite de diferentes fuentes, como el sitio web oficial, Google Play Store o sitios web de terceros. </li>
91
- <li> Es necesario habilitar la opción de fuentes desconocidas, extraer el archivo zip, e instalar el archivo apk para utilizar Facebook Lite en su dispositivo. </li>
92
- </ul>
93
- <h3>Preguntas frecuentes</h3>
94
- <borde de la tabla="1">
95
- <tr><td><b>Question</b></td><td><b>Answer</b></td></tr>
96
- <tr><td>¿Cuál es la diferencia entre Facebook y Facebook Lite? </td><td>Facebook Lite es una versión simplificada de Facebook que utiliza menos datos, espacio de almacenamiento y batería. También funciona en teléfonos Android más antiguos y conexiones a Internet más lentas. </td></tr>
97
- <tr><td>¿Es seguro usar Facebook Lite? </td><td>Facebook Lite es seguro de usar si lo descarga de fuentes confiables, como el sitio web oficial o Google Play Store. Sin embargo, debe tener cuidado al descargar de sitios web de terceros, ya que pueden contener malware o virus. </td></tr> <tr><td>¿Cómo puedo actualizar Facebook Lite? </td><td>Puedes actualizar Facebook Lite yendo a Google Play Store y buscando actualizaciones. También puede descargar la última versión del archivo zip de Facebook Lite desde el sitio web oficial u otras fuentes e instalarlo en su dispositivo. </td></tr>
98
-
99
- <tr><td>¿Puedo usar Facebook Lite y Facebook al mismo tiempo? </td><td>Sí, puedes usar Facebook Lite y Facebook al mismo tiempo en tu dispositivo. Sin embargo, tendrá que iniciar sesión con diferentes cuentas o usar diferentes modos, como modo de incógnito o invitado. </td></tr>
100
- </tabla></p> 64aa2da5cf<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/abstract/createable_api_resource.py DELETED
@@ -1,47 +0,0 @@
1
- from openai import api_requestor, util, error
2
- from openai.api_resources.abstract.api_resource import APIResource
3
- from openai.util import ApiType
4
-
5
-
6
- class CreateableAPIResource(APIResource):
7
- plain_old_data = False
8
-
9
- @classmethod
10
- def create(
11
- cls,
12
- api_key=None,
13
- api_base=None,
14
- api_type=None,
15
- request_id=None,
16
- api_version=None,
17
- organization=None,
18
- **params,
19
- ):
20
- requestor = api_requestor.APIRequestor(
21
- api_key,
22
- api_base=api_base,
23
- api_type=api_type,
24
- api_version=api_version,
25
- organization=organization,
26
- )
27
- typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
28
-
29
- if typed_api_type == ApiType.AZURE:
30
- base = cls.class_url()
31
- url = "/%s%s?api-version=%s" % (cls.azure_api_prefix, base, api_version)
32
- elif typed_api_type == ApiType.OPEN_AI:
33
- url = cls.class_url()
34
- else:
35
- raise error.InvalidAPIType('Unsupported API type %s' % api_type)
36
-
37
- response, _, api_key = requestor.request(
38
- "post", url, params, request_id=request_id
39
- )
40
-
41
- return util.convert_to_openai_object(
42
- response,
43
- api_key,
44
- api_version,
45
- organization,
46
- plain_old_data=cls.plain_old_data,
47
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BoomerangGirl/MagicPrompt-Stable-Diffusion/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: MagicPrompt Stable Diffusion
3
- emoji: 😻
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.3.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: Gustavosta/MagicPrompt-Stable-Diffusion
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Branon/oai-proxy/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Branon
3
- emoji: 🤓
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/docs/tutorials/deployment.md DELETED
@@ -1,94 +0,0 @@
1
- # Deployment
2
-
3
- ## Caffe2 Deployment
4
- We currently support converting a detectron2 model to Caffe2 format through ONNX.
5
- The converted Caffe2 model is able to run without detectron2 dependency in either Python or C++.
6
- It has a runtime optimized for CPU & mobile inference, but not for GPU inference.
7
-
8
- Caffe2 conversion requires PyTorch ≥ 1.4 and ONNX ≥ 1.6.
9
-
10
- ### Coverage
11
-
12
- It supports 3 most common meta architectures: `GeneralizedRCNN`, `RetinaNet`, `PanopticFPN`,
13
- and most official models under these 3 meta architectures.
14
-
15
- Users' custom extensions under these architectures (added through registration) are supported
16
- as long as they do not contain control flow or operators not available in Caffe2 (e.g. deformable convolution).
17
- For example, custom backbones and heads are often supported out of the box.
18
-
19
- ### Usage
20
-
21
- The conversion APIs are documented at [the API documentation](../modules/export.html).
22
- We provide a tool, `caffe2_converter.py` as an example that uses
23
- these APIs to convert a standard model.
24
-
25
- To convert an official Mask R-CNN trained on COCO, first
26
- [prepare the COCO dataset](../../datasets/), then pick the model from [Model Zoo](../../MODEL_ZOO.md), and run:
27
- ```
28
- cd tools/deploy/ && ./caffe2_converter.py --config-file ../../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \
29
- --output ./caffe2_model --run-eval \
30
- MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl \
31
- MODEL.DEVICE cpu
32
- ```
33
-
34
- Note that:
35
- 1. The conversion needs valid sample inputs & weights to trace the model. That's why the script requires the dataset.
36
- You can modify the script to obtain sample inputs in other ways.
37
- 2. GPU conversion is supported only with Pytorch's master. So we use `MODEL.DEVICE cpu`.
38
- 3. With the `--run-eval` flag, it will evaluate the converted models to verify its accuracy.
39
- The accuracy is typically slightly different (within 0.1 AP) from PyTorch due to
40
- numerical precisions between different implementations.
41
- It's recommended to always verify the accuracy in case your custom model is not supported by the
42
- conversion.
43
-
44
- The converted model is available at the specified `caffe2_model/` directory. Two files `model.pb`
45
- and `model_init.pb` that contain network structure and network parameters are necessary for deployment.
46
- These files can then be loaded in C++ or Python using Caffe2's APIs.
47
-
48
- The script generates `model.svg` file which contains a visualization of the network.
49
- You can also load `model.pb` to tools such as [netron](https://github.com/lutzroeder/netron) to visualize it.
50
-
51
- ### Use the model in C++/Python
52
-
53
- The model can be loaded in C++. An example [caffe2_mask_rcnn.cpp](../../tools/deploy/) is given,
54
- which performs CPU inference using `COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x`.
55
-
56
- The C++ code needs to be built with:
57
- * `libtorch.so`, `libc10.so`
58
- * gflags, glog, opencv
59
- * protobuf headers that match the version of your caffe2
60
- * MKL headers if caffe2 is built with MKL
61
- * `-D_GLIBCXX_USE_CXX11_ABI=` equals `torch._C._GLIBCXX_USE_CXX11_ABI`
62
-
63
- As an example, the following works inside official detectron2 docker:
64
- ```
65
- apt install libgflags-dev libgoogle-glog-dev
66
- pip install mkl-include
67
- wget https://github.com/protocolbuffers/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz
68
- tar xf protobuf-cpp-3.6.1.tar.gz
69
- export TORCH_ROOT=/home/appuser/.local/lib/python3.6/site-packages/torch/
70
- g++ -O2 caffe2_mask_rcnn.cpp `pkg-config --libs --cflags opencv` -Iprotobuf-3.6.1/src/ \
71
- -lgflags -lglog -I$TORCH_ROOT/include -L$TORCH_ROOT/lib -lc10 -ltorch \
72
- -I/home/appuser/.local/include -D_GLIBCXX_USE_CXX11_ABI=0 -o caffe2_mask_rcnn
73
-
74
- export LD_LIBRARY_PATH=$TORCH_ROOT/lib
75
- ./caffe2_mask_rcnn --predict_net=./model.pb --init_net=./model_init.pb --input=input.jpg
76
- ```
77
-
78
- Note that:
79
-
80
- * All converted models (the .pb files) take two input tensors:
81
- "data" is an NCHW image, and "im_info" is an Nx3 tensor consisting of (height, width, 1.0) for
82
- each image (the shape of "data" might be larger than that in "im_info" due to padding).
83
-
84
- * The converted models do not contain post-processing operations that
85
- transform raw layer outputs into formatted predictions.
86
- The example only produces raw outputs (28x28 masks) from the final
87
- layers that are not post-processed, because in actual deployment, an application often needs
88
- its custom lightweight post-processing (e.g. full-image masks for every detected object is often not necessary).
89
-
90
- We also provide a python wrapper around the converted model, in the
91
- [Caffe2Model.__call__](../modules/export.html#detectron2.export.Caffe2Model.__call__) method.
92
- This method has an interface that's identical to the [pytorch versions of models](models.html),
93
- and it internally applies pre/post-processing code to match the formats.
94
- They can serve as a reference for pre/post-processing in actual deployment.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChallengeHub/Chinese-LangChain/clc/langchain_application.py DELETED
@@ -1,97 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding:utf-8 _*-
3
- """
4
- @author:quincy qiang
5
- @license: Apache Licence
6
- @file: model.py
7
- @time: 2023/04/17
8
- @contact: [email protected]
9
- @software: PyCharm
10
- @description: coding..
11
- """
12
- from langchain.chains import RetrievalQA
13
- from langchain.prompts.prompt import PromptTemplate
14
-
15
- from clc.config import LangChainCFG
16
- from clc.gpt_service import ChatGLMService
17
- from clc.source_service import SourceService
18
-
19
-
20
- class LangChainApplication(object):
21
- def __init__(self, config):
22
- self.config = config
23
- self.llm_service = ChatGLMService()
24
- self.llm_service.load_model(model_name_or_path=self.config.llm_model_name)
25
- self.source_service = SourceService(config)
26
-
27
- # if self.config.kg_vector_stores is None:
28
- # print("init a source vector store")
29
- # self.source_service.init_source_vector()
30
- # else:
31
- # print("load zh_wikipedia source vector store ")
32
- # try:
33
- # self.source_service.load_vector_store(self.config.kg_vector_stores['初始化知识库'])
34
- # except Exception as e:
35
- # self.source_service.init_source_vector()
36
-
37
- def get_knowledge_based_answer(self, query,
38
- history_len=5,
39
- temperature=0.1,
40
- top_p=0.9,
41
- top_k=4,
42
- web_content='',
43
- chat_history=[]):
44
- if web_content:
45
- prompt_template = f"""基于以下已知信息,简洁和专业的来回答用户的问题。
46
- 如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
47
- 已知网络检索内容:{web_content}""" + """
48
- 已知内容:
49
- {context}
50
- 问题:
51
- {question}"""
52
- else:
53
- prompt_template = """基于以下已知信息,简洁和专业的来回答用户的问题。
54
- 如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
55
- 已知内容:
56
- {context}
57
- 问题:
58
- {question}"""
59
- prompt = PromptTemplate(template=prompt_template,
60
- input_variables=["context", "question"])
61
- self.llm_service.history = chat_history[-history_len:] if history_len > 0 else []
62
-
63
- self.llm_service.temperature = temperature
64
- self.llm_service.top_p = top_p
65
-
66
- knowledge_chain = RetrievalQA.from_llm(
67
- llm=self.llm_service,
68
- retriever=self.source_service.vector_store.as_retriever(
69
- search_kwargs={"k": top_k}),
70
- prompt=prompt)
71
- knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
72
- input_variables=["page_content"], template="{page_content}")
73
-
74
- knowledge_chain.return_source_documents = True
75
-
76
- result = knowledge_chain({"query": query})
77
- return result
78
-
79
- def get_llm_answer(self, query='', web_content=''):
80
- if web_content:
81
- prompt = f'基于网络检索内容:{web_content},回答以下问题{query}'
82
- else:
83
- prompt = query
84
- result = self.llm_service._call(prompt)
85
- return result
86
-
87
-
88
- if __name__ == '__main__':
89
- config = LangChainCFG()
90
- application = LangChainApplication(config)
91
- # result = application.get_knowledge_based_answer('马保国是谁')
92
- # print(result)
93
- # application.source_service.add_document('/home/searchgpt/yq/Knowledge-ChatGLM/docs/added/马保国.txt')
94
- # result = application.get_knowledge_based_answer('马保国是谁')
95
- # print(result)
96
- result = application.get_llm_answer('马保国是谁')
97
- print(result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChrisCaviar/ControlNet-v1-1/style.css DELETED
@@ -1,3 +0,0 @@
1
- h1 {
2
- text-align: center;
3
- }
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/utils/cv2_util.py DELETED
@@ -1,24 +0,0 @@
1
- """
2
- Module for cv2 utility functions and maintaining version compatibility
3
- between 3.x and 4.x
4
- """
5
- import cv2
6
-
7
-
8
- def findContours(*args, **kwargs):
9
- """
10
- Wraps cv2.findContours to maintain compatiblity between versions
11
- 3 and 4
12
-
13
- Returns:
14
- contours, hierarchy
15
- """
16
- if cv2.__version__.startswith('4'):
17
- contours, hierarchy = cv2.findContours(*args, **kwargs)
18
- elif cv2.__version__.startswith('3'):
19
- _, contours, hierarchy = cv2.findContours(*args, **kwargs)
20
- else:
21
- raise AssertionError(
22
- 'cv2 must be either version 3 or 4 to call this method')
23
-
24
- return contours, hierarchy
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/web_urldispatcher.py DELETED
@@ -1,1220 +0,0 @@
1
- import abc
2
- import asyncio
3
- import base64
4
- import hashlib
5
- import inspect
6
- import keyword
7
- import os
8
- import re
9
- import warnings
10
- from contextlib import contextmanager
11
- from functools import wraps
12
- from pathlib import Path
13
- from types import MappingProxyType
14
- from typing import (
15
- TYPE_CHECKING,
16
- Any,
17
- Awaitable,
18
- Callable,
19
- Container,
20
- Dict,
21
- Generator,
22
- Iterable,
23
- Iterator,
24
- List,
25
- Mapping,
26
- Optional,
27
- Pattern,
28
- Set,
29
- Sized,
30
- Tuple,
31
- Type,
32
- Union,
33
- cast,
34
- )
35
-
36
- from yarl import URL, __version__ as yarl_version # type: ignore[attr-defined]
37
-
38
- from . import hdrs
39
- from .abc import AbstractMatchInfo, AbstractRouter, AbstractView
40
- from .helpers import DEBUG
41
- from .http import HttpVersion11
42
- from .typedefs import Final, Handler, PathLike, TypedDict
43
- from .web_exceptions import (
44
- HTTPException,
45
- HTTPExpectationFailed,
46
- HTTPForbidden,
47
- HTTPMethodNotAllowed,
48
- HTTPNotFound,
49
- )
50
- from .web_fileresponse import FileResponse
51
- from .web_request import Request
52
- from .web_response import Response, StreamResponse
53
- from .web_routedef import AbstractRouteDef
54
-
55
- __all__ = (
56
- "UrlDispatcher",
57
- "UrlMappingMatchInfo",
58
- "AbstractResource",
59
- "Resource",
60
- "PlainResource",
61
- "DynamicResource",
62
- "AbstractRoute",
63
- "ResourceRoute",
64
- "StaticResource",
65
- "View",
66
- )
67
-
68
-
69
- if TYPE_CHECKING: # pragma: no cover
70
- from .web_app import Application
71
-
72
- BaseDict = Dict[str, str]
73
- else:
74
- BaseDict = dict
75
-
76
- YARL_VERSION: Final[Tuple[int, ...]] = tuple(map(int, yarl_version.split(".")[:2]))
77
-
78
- HTTP_METHOD_RE: Final[Pattern[str]] = re.compile(
79
- r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$"
80
- )
81
- ROUTE_RE: Final[Pattern[str]] = re.compile(
82
- r"(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})"
83
- )
84
- PATH_SEP: Final[str] = re.escape("/")
85
-
86
-
87
- _ExpectHandler = Callable[[Request], Awaitable[None]]
88
- _Resolve = Tuple[Optional["UrlMappingMatchInfo"], Set[str]]
89
-
90
-
91
- class _InfoDict(TypedDict, total=False):
92
- path: str
93
-
94
- formatter: str
95
- pattern: Pattern[str]
96
-
97
- directory: Path
98
- prefix: str
99
- routes: Mapping[str, "AbstractRoute"]
100
-
101
- app: "Application"
102
-
103
- domain: str
104
-
105
- rule: "AbstractRuleMatching"
106
-
107
- http_exception: HTTPException
108
-
109
-
110
- class AbstractResource(Sized, Iterable["AbstractRoute"]):
111
- def __init__(self, *, name: Optional[str] = None) -> None:
112
- self._name = name
113
-
114
- @property
115
- def name(self) -> Optional[str]:
116
- return self._name
117
-
118
- @property
119
- @abc.abstractmethod
120
- def canonical(self) -> str:
121
- """Exposes the resource's canonical path.
122
-
123
- For example '/foo/bar/{name}'
124
-
125
- """
126
-
127
- @abc.abstractmethod # pragma: no branch
128
- def url_for(self, **kwargs: str) -> URL:
129
- """Construct url for resource with additional params."""
130
-
131
- @abc.abstractmethod # pragma: no branch
132
- async def resolve(self, request: Request) -> _Resolve:
133
- """Resolve resource.
134
-
135
- Return (UrlMappingMatchInfo, allowed_methods) pair.
136
- """
137
-
138
- @abc.abstractmethod
139
- def add_prefix(self, prefix: str) -> None:
140
- """Add a prefix to processed URLs.
141
-
142
- Required for subapplications support.
143
- """
144
-
145
- @abc.abstractmethod
146
- def get_info(self) -> _InfoDict:
147
- """Return a dict with additional info useful for introspection"""
148
-
149
- def freeze(self) -> None:
150
- pass
151
-
152
- @abc.abstractmethod
153
- def raw_match(self, path: str) -> bool:
154
- """Perform a raw match against path"""
155
-
156
-
157
- class AbstractRoute(abc.ABC):
158
- def __init__(
159
- self,
160
- method: str,
161
- handler: Union[Handler, Type[AbstractView]],
162
- *,
163
- expect_handler: Optional[_ExpectHandler] = None,
164
- resource: Optional[AbstractResource] = None,
165
- ) -> None:
166
-
167
- if expect_handler is None:
168
- expect_handler = _default_expect_handler
169
-
170
- assert asyncio.iscoroutinefunction(
171
- expect_handler
172
- ), f"Coroutine is expected, got {expect_handler!r}"
173
-
174
- method = method.upper()
175
- if not HTTP_METHOD_RE.match(method):
176
- raise ValueError(f"{method} is not allowed HTTP method")
177
-
178
- assert callable(handler), handler
179
- if asyncio.iscoroutinefunction(handler):
180
- pass
181
- elif inspect.isgeneratorfunction(handler):
182
- warnings.warn(
183
- "Bare generators are deprecated, " "use @coroutine wrapper",
184
- DeprecationWarning,
185
- )
186
- elif isinstance(handler, type) and issubclass(handler, AbstractView):
187
- pass
188
- else:
189
- warnings.warn(
190
- "Bare functions are deprecated, " "use async ones", DeprecationWarning
191
- )
192
-
193
- @wraps(handler)
194
- async def handler_wrapper(request: Request) -> StreamResponse:
195
- result = old_handler(request)
196
- if asyncio.iscoroutine(result):
197
- return await result
198
- return result # type: ignore[return-value]
199
-
200
- old_handler = handler
201
- handler = handler_wrapper
202
-
203
- self._method = method
204
- self._handler = handler
205
- self._expect_handler = expect_handler
206
- self._resource = resource
207
-
208
- @property
209
- def method(self) -> str:
210
- return self._method
211
-
212
- @property
213
- def handler(self) -> Handler:
214
- return self._handler
215
-
216
- @property
217
- @abc.abstractmethod
218
- def name(self) -> Optional[str]:
219
- """Optional route's name, always equals to resource's name."""
220
-
221
- @property
222
- def resource(self) -> Optional[AbstractResource]:
223
- return self._resource
224
-
225
- @abc.abstractmethod
226
- def get_info(self) -> _InfoDict:
227
- """Return a dict with additional info useful for introspection"""
228
-
229
- @abc.abstractmethod # pragma: no branch
230
- def url_for(self, *args: str, **kwargs: str) -> URL:
231
- """Construct url for route with additional params."""
232
-
233
- async def handle_expect_header(self, request: Request) -> None:
234
- await self._expect_handler(request)
235
-
236
-
237
- class UrlMappingMatchInfo(BaseDict, AbstractMatchInfo):
238
- def __init__(self, match_dict: Dict[str, str], route: AbstractRoute):
239
- super().__init__(match_dict)
240
- self._route = route
241
- self._apps: List[Application] = []
242
- self._current_app: Optional[Application] = None
243
- self._frozen = False
244
-
245
- @property
246
- def handler(self) -> Handler:
247
- return self._route.handler
248
-
249
- @property
250
- def route(self) -> AbstractRoute:
251
- return self._route
252
-
253
- @property
254
- def expect_handler(self) -> _ExpectHandler:
255
- return self._route.handle_expect_header
256
-
257
- @property
258
- def http_exception(self) -> Optional[HTTPException]:
259
- return None
260
-
261
- def get_info(self) -> _InfoDict: # type: ignore[override]
262
- return self._route.get_info()
263
-
264
- @property
265
- def apps(self) -> Tuple["Application", ...]:
266
- return tuple(self._apps)
267
-
268
- def add_app(self, app: "Application") -> None:
269
- if self._frozen:
270
- raise RuntimeError("Cannot change apps stack after .freeze() call")
271
- if self._current_app is None:
272
- self._current_app = app
273
- self._apps.insert(0, app)
274
-
275
- @property
276
- def current_app(self) -> "Application":
277
- app = self._current_app
278
- assert app is not None
279
- return app
280
-
281
- @contextmanager
282
- def set_current_app(self, app: "Application") -> Generator[None, None, None]:
283
- if DEBUG: # pragma: no cover
284
- if app not in self._apps:
285
- raise RuntimeError(
286
- "Expected one of the following apps {!r}, got {!r}".format(
287
- self._apps, app
288
- )
289
- )
290
- prev = self._current_app
291
- self._current_app = app
292
- try:
293
- yield
294
- finally:
295
- self._current_app = prev
296
-
297
- def freeze(self) -> None:
298
- self._frozen = True
299
-
300
- def __repr__(self) -> str:
301
- return f"<MatchInfo {super().__repr__()}: {self._route}>"
302
-
303
-
304
- class MatchInfoError(UrlMappingMatchInfo):
305
- def __init__(self, http_exception: HTTPException) -> None:
306
- self._exception = http_exception
307
- super().__init__({}, SystemRoute(self._exception))
308
-
309
- @property
310
- def http_exception(self) -> HTTPException:
311
- return self._exception
312
-
313
- def __repr__(self) -> str:
314
- return "<MatchInfoError {}: {}>".format(
315
- self._exception.status, self._exception.reason
316
- )
317
-
318
-
319
- async def _default_expect_handler(request: Request) -> None:
320
- """Default handler for Expect header.
321
-
322
- Just send "100 Continue" to client.
323
- raise HTTPExpectationFailed if value of header is not "100-continue"
324
- """
325
- expect = request.headers.get(hdrs.EXPECT, "")
326
- if request.version == HttpVersion11:
327
- if expect.lower() == "100-continue":
328
- await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n")
329
- else:
330
- raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect)
331
-
332
-
333
- class Resource(AbstractResource):
334
- def __init__(self, *, name: Optional[str] = None) -> None:
335
- super().__init__(name=name)
336
- self._routes: List[ResourceRoute] = []
337
-
338
- def add_route(
339
- self,
340
- method: str,
341
- handler: Union[Type[AbstractView], Handler],
342
- *,
343
- expect_handler: Optional[_ExpectHandler] = None,
344
- ) -> "ResourceRoute":
345
-
346
- for route_obj in self._routes:
347
- if route_obj.method == method or route_obj.method == hdrs.METH_ANY:
348
- raise RuntimeError(
349
- "Added route will never be executed, "
350
- "method {route.method} is already "
351
- "registered".format(route=route_obj)
352
- )
353
-
354
- route_obj = ResourceRoute(method, handler, self, expect_handler=expect_handler)
355
- self.register_route(route_obj)
356
- return route_obj
357
-
358
- def register_route(self, route: "ResourceRoute") -> None:
359
- assert isinstance(
360
- route, ResourceRoute
361
- ), f"Instance of Route class is required, got {route!r}"
362
- self._routes.append(route)
363
-
364
- async def resolve(self, request: Request) -> _Resolve:
365
- allowed_methods: Set[str] = set()
366
-
367
- match_dict = self._match(request.rel_url.raw_path)
368
- if match_dict is None:
369
- return None, allowed_methods
370
-
371
- for route_obj in self._routes:
372
- route_method = route_obj.method
373
- allowed_methods.add(route_method)
374
-
375
- if route_method == request.method or route_method == hdrs.METH_ANY:
376
- return (UrlMappingMatchInfo(match_dict, route_obj), allowed_methods)
377
- else:
378
- return None, allowed_methods
379
-
380
- @abc.abstractmethod
381
- def _match(self, path: str) -> Optional[Dict[str, str]]:
382
- pass # pragma: no cover
383
-
384
- def __len__(self) -> int:
385
- return len(self._routes)
386
-
387
- def __iter__(self) -> Iterator[AbstractRoute]:
388
- return iter(self._routes)
389
-
390
- # TODO: implement all abstract methods
391
-
392
-
393
- class PlainResource(Resource):
394
- def __init__(self, path: str, *, name: Optional[str] = None) -> None:
395
- super().__init__(name=name)
396
- assert not path or path.startswith("/")
397
- self._path = path
398
-
399
- @property
400
- def canonical(self) -> str:
401
- return self._path
402
-
403
- def freeze(self) -> None:
404
- if not self._path:
405
- self._path = "/"
406
-
407
- def add_prefix(self, prefix: str) -> None:
408
- assert prefix.startswith("/")
409
- assert not prefix.endswith("/")
410
- assert len(prefix) > 1
411
- self._path = prefix + self._path
412
-
413
- def _match(self, path: str) -> Optional[Dict[str, str]]:
414
- # string comparison is about 10 times faster than regexp matching
415
- if self._path == path:
416
- return {}
417
- else:
418
- return None
419
-
420
- def raw_match(self, path: str) -> bool:
421
- return self._path == path
422
-
423
- def get_info(self) -> _InfoDict:
424
- return {"path": self._path}
425
-
426
- def url_for(self) -> URL: # type: ignore[override]
427
- return URL.build(path=self._path, encoded=True)
428
-
429
- def __repr__(self) -> str:
430
- name = "'" + self.name + "' " if self.name is not None else ""
431
- return f"<PlainResource {name} {self._path}>"
432
-
433
-
434
- class DynamicResource(Resource):
435
-
436
- DYN = re.compile(r"\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*)\}")
437
- DYN_WITH_RE = re.compile(r"\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*):(?P<re>.+)\}")
438
- GOOD = r"[^{}/]+"
439
-
440
- def __init__(self, path: str, *, name: Optional[str] = None) -> None:
441
- super().__init__(name=name)
442
- pattern = ""
443
- formatter = ""
444
- for part in ROUTE_RE.split(path):
445
- match = self.DYN.fullmatch(part)
446
- if match:
447
- pattern += "(?P<{}>{})".format(match.group("var"), self.GOOD)
448
- formatter += "{" + match.group("var") + "}"
449
- continue
450
-
451
- match = self.DYN_WITH_RE.fullmatch(part)
452
- if match:
453
- pattern += "(?P<{var}>{re})".format(**match.groupdict())
454
- formatter += "{" + match.group("var") + "}"
455
- continue
456
-
457
- if "{" in part or "}" in part:
458
- raise ValueError(f"Invalid path '{path}'['{part}']")
459
-
460
- part = _requote_path(part)
461
- formatter += part
462
- pattern += re.escape(part)
463
-
464
- try:
465
- compiled = re.compile(pattern)
466
- except re.error as exc:
467
- raise ValueError(f"Bad pattern '{pattern}': {exc}") from None
468
- assert compiled.pattern.startswith(PATH_SEP)
469
- assert formatter.startswith("/")
470
- self._pattern = compiled
471
- self._formatter = formatter
472
-
473
- @property
474
- def canonical(self) -> str:
475
- return self._formatter
476
-
477
- def add_prefix(self, prefix: str) -> None:
478
- assert prefix.startswith("/")
479
- assert not prefix.endswith("/")
480
- assert len(prefix) > 1
481
- self._pattern = re.compile(re.escape(prefix) + self._pattern.pattern)
482
- self._formatter = prefix + self._formatter
483
-
484
- def _match(self, path: str) -> Optional[Dict[str, str]]:
485
- match = self._pattern.fullmatch(path)
486
- if match is None:
487
- return None
488
- else:
489
- return {
490
- key: _unquote_path(value) for key, value in match.groupdict().items()
491
- }
492
-
493
- def raw_match(self, path: str) -> bool:
494
- return self._formatter == path
495
-
496
- def get_info(self) -> _InfoDict:
497
- return {"formatter": self._formatter, "pattern": self._pattern}
498
-
499
- def url_for(self, **parts: str) -> URL:
500
- url = self._formatter.format_map({k: _quote_path(v) for k, v in parts.items()})
501
- return URL.build(path=url, encoded=True)
502
-
503
- def __repr__(self) -> str:
504
- name = "'" + self.name + "' " if self.name is not None else ""
505
- return "<DynamicResource {name} {formatter}>".format(
506
- name=name, formatter=self._formatter
507
- )
508
-
509
-
510
- class PrefixResource(AbstractResource):
511
- def __init__(self, prefix: str, *, name: Optional[str] = None) -> None:
512
- assert not prefix or prefix.startswith("/"), prefix
513
- assert prefix in ("", "/") or not prefix.endswith("/"), prefix
514
- super().__init__(name=name)
515
- self._prefix = _requote_path(prefix)
516
- self._prefix2 = self._prefix + "/"
517
-
518
- @property
519
- def canonical(self) -> str:
520
- return self._prefix
521
-
522
- def add_prefix(self, prefix: str) -> None:
523
- assert prefix.startswith("/")
524
- assert not prefix.endswith("/")
525
- assert len(prefix) > 1
526
- self._prefix = prefix + self._prefix
527
- self._prefix2 = self._prefix + "/"
528
-
529
- def raw_match(self, prefix: str) -> bool:
530
- return False
531
-
532
- # TODO: impl missing abstract methods
533
-
534
-
535
- class StaticResource(PrefixResource):
536
- VERSION_KEY = "v"
537
-
538
- def __init__(
539
- self,
540
- prefix: str,
541
- directory: PathLike,
542
- *,
543
- name: Optional[str] = None,
544
- expect_handler: Optional[_ExpectHandler] = None,
545
- chunk_size: int = 256 * 1024,
546
- show_index: bool = False,
547
- follow_symlinks: bool = False,
548
- append_version: bool = False,
549
- ) -> None:
550
- super().__init__(prefix, name=name)
551
- try:
552
- directory = Path(directory)
553
- if str(directory).startswith("~"):
554
- directory = Path(os.path.expanduser(str(directory)))
555
- directory = directory.resolve()
556
- if not directory.is_dir():
557
- raise ValueError("Not a directory")
558
- except (FileNotFoundError, ValueError) as error:
559
- raise ValueError(f"No directory exists at '{directory}'") from error
560
- self._directory = directory
561
- self._show_index = show_index
562
- self._chunk_size = chunk_size
563
- self._follow_symlinks = follow_symlinks
564
- self._expect_handler = expect_handler
565
- self._append_version = append_version
566
-
567
- self._routes = {
568
- "GET": ResourceRoute(
569
- "GET", self._handle, self, expect_handler=expect_handler
570
- ),
571
- "HEAD": ResourceRoute(
572
- "HEAD", self._handle, self, expect_handler=expect_handler
573
- ),
574
- }
575
-
576
- def url_for( # type: ignore[override]
577
- self,
578
- *,
579
- filename: Union[str, Path],
580
- append_version: Optional[bool] = None,
581
- ) -> URL:
582
- if append_version is None:
583
- append_version = self._append_version
584
- if isinstance(filename, Path):
585
- filename = str(filename)
586
- filename = filename.lstrip("/")
587
-
588
- url = URL.build(path=self._prefix, encoded=True)
589
- # filename is not encoded
590
- if YARL_VERSION < (1, 6):
591
- url = url / filename.replace("%", "%25")
592
- else:
593
- url = url / filename
594
-
595
- if append_version:
596
- try:
597
- filepath = self._directory.joinpath(filename).resolve()
598
- if not self._follow_symlinks:
599
- filepath.relative_to(self._directory)
600
- except (ValueError, FileNotFoundError):
601
- # ValueError for case when path point to symlink
602
- # with follow_symlinks is False
603
- return url # relatively safe
604
- if filepath.is_file():
605
- # TODO cache file content
606
- # with file watcher for cache invalidation
607
- with filepath.open("rb") as f:
608
- file_bytes = f.read()
609
- h = self._get_file_hash(file_bytes)
610
- url = url.with_query({self.VERSION_KEY: h})
611
- return url
612
- return url
613
-
614
- @staticmethod
615
- def _get_file_hash(byte_array: bytes) -> str:
616
- m = hashlib.sha256() # todo sha256 can be configurable param
617
- m.update(byte_array)
618
- b64 = base64.urlsafe_b64encode(m.digest())
619
- return b64.decode("ascii")
620
-
621
- def get_info(self) -> _InfoDict:
622
- return {
623
- "directory": self._directory,
624
- "prefix": self._prefix,
625
- "routes": self._routes,
626
- }
627
-
628
- def set_options_route(self, handler: Handler) -> None:
629
- if "OPTIONS" in self._routes:
630
- raise RuntimeError("OPTIONS route was set already")
631
- self._routes["OPTIONS"] = ResourceRoute(
632
- "OPTIONS", handler, self, expect_handler=self._expect_handler
633
- )
634
-
635
- async def resolve(self, request: Request) -> _Resolve:
636
- path = request.rel_url.raw_path
637
- method = request.method
638
- allowed_methods = set(self._routes)
639
- if not path.startswith(self._prefix2) and path != self._prefix:
640
- return None, set()
641
-
642
- if method not in allowed_methods:
643
- return None, allowed_methods
644
-
645
- match_dict = {"filename": _unquote_path(path[len(self._prefix) + 1 :])}
646
- return (UrlMappingMatchInfo(match_dict, self._routes[method]), allowed_methods)
647
-
648
- def __len__(self) -> int:
649
- return len(self._routes)
650
-
651
- def __iter__(self) -> Iterator[AbstractRoute]:
652
- return iter(self._routes.values())
653
-
654
- async def _handle(self, request: Request) -> StreamResponse:
655
- rel_url = request.match_info["filename"]
656
- try:
657
- filename = Path(rel_url)
658
- if filename.anchor:
659
- # rel_url is an absolute name like
660
- # /static/\\machine_name\c$ or /static/D:\path
661
- # where the static dir is totally different
662
- raise HTTPForbidden()
663
- filepath = self._directory.joinpath(filename).resolve()
664
- if not self._follow_symlinks:
665
- filepath.relative_to(self._directory)
666
- except (ValueError, FileNotFoundError) as error:
667
- # relatively safe
668
- raise HTTPNotFound() from error
669
- except HTTPForbidden:
670
- raise
671
- except Exception as error:
672
- # perm error or other kind!
673
- request.app.logger.exception(error)
674
- raise HTTPNotFound() from error
675
-
676
- # on opening a dir, load its contents if allowed
677
- if filepath.is_dir():
678
- if self._show_index:
679
- try:
680
- return Response(
681
- text=self._directory_as_html(filepath), content_type="text/html"
682
- )
683
- except PermissionError:
684
- raise HTTPForbidden()
685
- else:
686
- raise HTTPForbidden()
687
- elif filepath.is_file():
688
- return FileResponse(filepath, chunk_size=self._chunk_size)
689
- else:
690
- raise HTTPNotFound
691
-
692
- def _directory_as_html(self, filepath: Path) -> str:
693
- # returns directory's index as html
694
-
695
- # sanity check
696
- assert filepath.is_dir()
697
-
698
- relative_path_to_dir = filepath.relative_to(self._directory).as_posix()
699
- index_of = f"Index of /{relative_path_to_dir}"
700
- h1 = f"<h1>{index_of}</h1>"
701
-
702
- index_list = []
703
- dir_index = filepath.iterdir()
704
- for _file in sorted(dir_index):
705
- # show file url as relative to static path
706
- rel_path = _file.relative_to(self._directory).as_posix()
707
- file_url = self._prefix + "/" + rel_path
708
-
709
- # if file is a directory, add '/' to the end of the name
710
- if _file.is_dir():
711
- file_name = f"{_file.name}/"
712
- else:
713
- file_name = _file.name
714
-
715
- index_list.append(
716
- '<li><a href="{url}">{name}</a></li>'.format(
717
- url=file_url, name=file_name
718
- )
719
- )
720
- ul = "<ul>\n{}\n</ul>".format("\n".join(index_list))
721
- body = f"<body>\n{h1}\n{ul}\n</body>"
722
-
723
- head_str = f"<head>\n<title>{index_of}</title>\n</head>"
724
- html = f"<html>\n{head_str}\n{body}\n</html>"
725
-
726
- return html
727
-
728
- def __repr__(self) -> str:
729
- name = "'" + self.name + "'" if self.name is not None else ""
730
- return "<StaticResource {name} {path} -> {directory!r}>".format(
731
- name=name, path=self._prefix, directory=self._directory
732
- )
733
-
734
-
735
- class PrefixedSubAppResource(PrefixResource):
736
- def __init__(self, prefix: str, app: "Application") -> None:
737
- super().__init__(prefix)
738
- self._app = app
739
- for resource in app.router.resources():
740
- resource.add_prefix(prefix)
741
-
742
- def add_prefix(self, prefix: str) -> None:
743
- super().add_prefix(prefix)
744
- for resource in self._app.router.resources():
745
- resource.add_prefix(prefix)
746
-
747
- def url_for(self, *args: str, **kwargs: str) -> URL:
748
- raise RuntimeError(".url_for() is not supported " "by sub-application root")
749
-
750
- def get_info(self) -> _InfoDict:
751
- return {"app": self._app, "prefix": self._prefix}
752
-
753
- async def resolve(self, request: Request) -> _Resolve:
754
- if (
755
- not request.url.raw_path.startswith(self._prefix2)
756
- and request.url.raw_path != self._prefix
757
- ):
758
- return None, set()
759
- match_info = await self._app.router.resolve(request)
760
- match_info.add_app(self._app)
761
- if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
762
- methods = match_info.http_exception.allowed_methods
763
- else:
764
- methods = set()
765
- return match_info, methods
766
-
767
- def __len__(self) -> int:
768
- return len(self._app.router.routes())
769
-
770
- def __iter__(self) -> Iterator[AbstractRoute]:
771
- return iter(self._app.router.routes())
772
-
773
- def __repr__(self) -> str:
774
- return "<PrefixedSubAppResource {prefix} -> {app!r}>".format(
775
- prefix=self._prefix, app=self._app
776
- )
777
-
778
-
779
- class AbstractRuleMatching(abc.ABC):
780
- @abc.abstractmethod # pragma: no branch
781
- async def match(self, request: Request) -> bool:
782
- """Return bool if the request satisfies the criteria"""
783
-
784
- @abc.abstractmethod # pragma: no branch
785
- def get_info(self) -> _InfoDict:
786
- """Return a dict with additional info useful for introspection"""
787
-
788
- @property
789
- @abc.abstractmethod # pragma: no branch
790
- def canonical(self) -> str:
791
- """Return a str"""
792
-
793
-
794
- class Domain(AbstractRuleMatching):
795
- re_part = re.compile(r"(?!-)[a-z\d-]{1,63}(?<!-)")
796
-
797
- def __init__(self, domain: str) -> None:
798
- super().__init__()
799
- self._domain = self.validation(domain)
800
-
801
- @property
802
- def canonical(self) -> str:
803
- return self._domain
804
-
805
- def validation(self, domain: str) -> str:
806
- if not isinstance(domain, str):
807
- raise TypeError("Domain must be str")
808
- domain = domain.rstrip(".").lower()
809
- if not domain:
810
- raise ValueError("Domain cannot be empty")
811
- elif "://" in domain:
812
- raise ValueError("Scheme not supported")
813
- url = URL("http://" + domain)
814
- assert url.raw_host is not None
815
- if not all(self.re_part.fullmatch(x) for x in url.raw_host.split(".")):
816
- raise ValueError("Domain not valid")
817
- if url.port == 80:
818
- return url.raw_host
819
- return f"{url.raw_host}:{url.port}"
820
-
821
- async def match(self, request: Request) -> bool:
822
- host = request.headers.get(hdrs.HOST)
823
- if not host:
824
- return False
825
- return self.match_domain(host)
826
-
827
- def match_domain(self, host: str) -> bool:
828
- return host.lower() == self._domain
829
-
830
- def get_info(self) -> _InfoDict:
831
- return {"domain": self._domain}
832
-
833
-
834
- class MaskDomain(Domain):
835
- re_part = re.compile(r"(?!-)[a-z\d\*-]{1,63}(?<!-)")
836
-
837
- def __init__(self, domain: str) -> None:
838
- super().__init__(domain)
839
- mask = self._domain.replace(".", r"\.").replace("*", ".*")
840
- self._mask = re.compile(mask)
841
-
842
- @property
843
- def canonical(self) -> str:
844
- return self._mask.pattern
845
-
846
- def match_domain(self, host: str) -> bool:
847
- return self._mask.fullmatch(host) is not None
848
-
849
-
850
- class MatchedSubAppResource(PrefixedSubAppResource):
851
- def __init__(self, rule: AbstractRuleMatching, app: "Application") -> None:
852
- AbstractResource.__init__(self)
853
- self._prefix = ""
854
- self._app = app
855
- self._rule = rule
856
-
857
- @property
858
- def canonical(self) -> str:
859
- return self._rule.canonical
860
-
861
- def get_info(self) -> _InfoDict:
862
- return {"app": self._app, "rule": self._rule}
863
-
864
- async def resolve(self, request: Request) -> _Resolve:
865
- if not await self._rule.match(request):
866
- return None, set()
867
- match_info = await self._app.router.resolve(request)
868
- match_info.add_app(self._app)
869
- if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
870
- methods = match_info.http_exception.allowed_methods
871
- else:
872
- methods = set()
873
- return match_info, methods
874
-
875
- def __repr__(self) -> str:
876
- return "<MatchedSubAppResource -> {app!r}>" "".format(app=self._app)
877
-
878
-
879
- class ResourceRoute(AbstractRoute):
880
- """A route with resource"""
881
-
882
- def __init__(
883
- self,
884
- method: str,
885
- handler: Union[Handler, Type[AbstractView]],
886
- resource: AbstractResource,
887
- *,
888
- expect_handler: Optional[_ExpectHandler] = None,
889
- ) -> None:
890
- super().__init__(
891
- method, handler, expect_handler=expect_handler, resource=resource
892
- )
893
-
894
- def __repr__(self) -> str:
895
- return "<ResourceRoute [{method}] {resource} -> {handler!r}".format(
896
- method=self.method, resource=self._resource, handler=self.handler
897
- )
898
-
899
- @property
900
- def name(self) -> Optional[str]:
901
- if self._resource is None:
902
- return None
903
- return self._resource.name
904
-
905
- def url_for(self, *args: str, **kwargs: str) -> URL:
906
- """Construct url for route with additional params."""
907
- assert self._resource is not None
908
- return self._resource.url_for(*args, **kwargs)
909
-
910
- def get_info(self) -> _InfoDict:
911
- assert self._resource is not None
912
- return self._resource.get_info()
913
-
914
-
915
- class SystemRoute(AbstractRoute):
916
- def __init__(self, http_exception: HTTPException) -> None:
917
- super().__init__(hdrs.METH_ANY, self._handle)
918
- self._http_exception = http_exception
919
-
920
- def url_for(self, *args: str, **kwargs: str) -> URL:
921
- raise RuntimeError(".url_for() is not allowed for SystemRoute")
922
-
923
- @property
924
- def name(self) -> Optional[str]:
925
- return None
926
-
927
- def get_info(self) -> _InfoDict:
928
- return {"http_exception": self._http_exception}
929
-
930
- async def _handle(self, request: Request) -> StreamResponse:
931
- raise self._http_exception
932
-
933
- @property
934
- def status(self) -> int:
935
- return self._http_exception.status
936
-
937
- @property
938
- def reason(self) -> str:
939
- return self._http_exception.reason
940
-
941
- def __repr__(self) -> str:
942
- return "<SystemRoute {self.status}: {self.reason}>".format(self=self)
943
-
944
-
945
- class View(AbstractView):
946
- async def _iter(self) -> StreamResponse:
947
- if self.request.method not in hdrs.METH_ALL:
948
- self._raise_allowed_methods()
949
- method: Callable[[], Awaitable[StreamResponse]] = getattr(
950
- self, self.request.method.lower(), None
951
- )
952
- if method is None:
953
- self._raise_allowed_methods()
954
- resp = await method()
955
- return resp
956
-
957
- def __await__(self) -> Generator[Any, None, StreamResponse]:
958
- return self._iter().__await__()
959
-
960
- def _raise_allowed_methods(self) -> None:
961
- allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m.lower())}
962
- raise HTTPMethodNotAllowed(self.request.method, allowed_methods)
963
-
964
-
965
- class ResourcesView(Sized, Iterable[AbstractResource], Container[AbstractResource]):
966
- def __init__(self, resources: List[AbstractResource]) -> None:
967
- self._resources = resources
968
-
969
- def __len__(self) -> int:
970
- return len(self._resources)
971
-
972
- def __iter__(self) -> Iterator[AbstractResource]:
973
- yield from self._resources
974
-
975
- def __contains__(self, resource: object) -> bool:
976
- return resource in self._resources
977
-
978
-
979
- class RoutesView(Sized, Iterable[AbstractRoute], Container[AbstractRoute]):
980
- def __init__(self, resources: List[AbstractResource]):
981
- self._routes: List[AbstractRoute] = []
982
- for resource in resources:
983
- for route in resource:
984
- self._routes.append(route)
985
-
986
- def __len__(self) -> int:
987
- return len(self._routes)
988
-
989
- def __iter__(self) -> Iterator[AbstractRoute]:
990
- yield from self._routes
991
-
992
- def __contains__(self, route: object) -> bool:
993
- return route in self._routes
994
-
995
-
996
- class UrlDispatcher(AbstractRouter, Mapping[str, AbstractResource]):
997
-
998
- NAME_SPLIT_RE = re.compile(r"[.:-]")
999
-
1000
- def __init__(self) -> None:
1001
- super().__init__()
1002
- self._resources: List[AbstractResource] = []
1003
- self._named_resources: Dict[str, AbstractResource] = {}
1004
-
1005
- async def resolve(self, request: Request) -> UrlMappingMatchInfo:
1006
- method = request.method
1007
- allowed_methods: Set[str] = set()
1008
-
1009
- for resource in self._resources:
1010
- match_dict, allowed = await resource.resolve(request)
1011
- if match_dict is not None:
1012
- return match_dict
1013
- else:
1014
- allowed_methods |= allowed
1015
-
1016
- if allowed_methods:
1017
- return MatchInfoError(HTTPMethodNotAllowed(method, allowed_methods))
1018
- else:
1019
- return MatchInfoError(HTTPNotFound())
1020
-
1021
- def __iter__(self) -> Iterator[str]:
1022
- return iter(self._named_resources)
1023
-
1024
- def __len__(self) -> int:
1025
- return len(self._named_resources)
1026
-
1027
- def __contains__(self, resource: object) -> bool:
1028
- return resource in self._named_resources
1029
-
1030
- def __getitem__(self, name: str) -> AbstractResource:
1031
- return self._named_resources[name]
1032
-
1033
- def resources(self) -> ResourcesView:
1034
- return ResourcesView(self._resources)
1035
-
1036
- def routes(self) -> RoutesView:
1037
- return RoutesView(self._resources)
1038
-
1039
- def named_resources(self) -> Mapping[str, AbstractResource]:
1040
- return MappingProxyType(self._named_resources)
1041
-
1042
- def register_resource(self, resource: AbstractResource) -> None:
1043
- assert isinstance(
1044
- resource, AbstractResource
1045
- ), f"Instance of AbstractResource class is required, got {resource!r}"
1046
- if self.frozen:
1047
- raise RuntimeError("Cannot register a resource into frozen router.")
1048
-
1049
- name = resource.name
1050
-
1051
- if name is not None:
1052
- parts = self.NAME_SPLIT_RE.split(name)
1053
- for part in parts:
1054
- if keyword.iskeyword(part):
1055
- raise ValueError(
1056
- f"Incorrect route name {name!r}, "
1057
- "python keywords cannot be used "
1058
- "for route name"
1059
- )
1060
- if not part.isidentifier():
1061
- raise ValueError(
1062
- "Incorrect route name {!r}, "
1063
- "the name should be a sequence of "
1064
- "python identifiers separated "
1065
- "by dash, dot or column".format(name)
1066
- )
1067
- if name in self._named_resources:
1068
- raise ValueError(
1069
- "Duplicate {!r}, "
1070
- "already handled by {!r}".format(name, self._named_resources[name])
1071
- )
1072
- self._named_resources[name] = resource
1073
- self._resources.append(resource)
1074
-
1075
- def add_resource(self, path: str, *, name: Optional[str] = None) -> Resource:
1076
- if path and not path.startswith("/"):
1077
- raise ValueError("path should be started with / or be empty")
1078
- # Reuse last added resource if path and name are the same
1079
- if self._resources:
1080
- resource = self._resources[-1]
1081
- if resource.name == name and resource.raw_match(path):
1082
- return cast(Resource, resource)
1083
- if not ("{" in path or "}" in path or ROUTE_RE.search(path)):
1084
- resource = PlainResource(_requote_path(path), name=name)
1085
- self.register_resource(resource)
1086
- return resource
1087
- resource = DynamicResource(path, name=name)
1088
- self.register_resource(resource)
1089
- return resource
1090
-
1091
- def add_route(
1092
- self,
1093
- method: str,
1094
- path: str,
1095
- handler: Union[Handler, Type[AbstractView]],
1096
- *,
1097
- name: Optional[str] = None,
1098
- expect_handler: Optional[_ExpectHandler] = None,
1099
- ) -> AbstractRoute:
1100
- resource = self.add_resource(path, name=name)
1101
- return resource.add_route(method, handler, expect_handler=expect_handler)
1102
-
1103
- def add_static(
1104
- self,
1105
- prefix: str,
1106
- path: PathLike,
1107
- *,
1108
- name: Optional[str] = None,
1109
- expect_handler: Optional[_ExpectHandler] = None,
1110
- chunk_size: int = 256 * 1024,
1111
- show_index: bool = False,
1112
- follow_symlinks: bool = False,
1113
- append_version: bool = False,
1114
- ) -> AbstractResource:
1115
- """Add static files view.
1116
-
1117
- prefix - url prefix
1118
- path - folder with files
1119
-
1120
- """
1121
- assert prefix.startswith("/")
1122
- if prefix.endswith("/"):
1123
- prefix = prefix[:-1]
1124
- resource = StaticResource(
1125
- prefix,
1126
- path,
1127
- name=name,
1128
- expect_handler=expect_handler,
1129
- chunk_size=chunk_size,
1130
- show_index=show_index,
1131
- follow_symlinks=follow_symlinks,
1132
- append_version=append_version,
1133
- )
1134
- self.register_resource(resource)
1135
- return resource
1136
-
1137
- def add_head(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
1138
- """Shortcut for add_route with method HEAD."""
1139
- return self.add_route(hdrs.METH_HEAD, path, handler, **kwargs)
1140
-
1141
- def add_options(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
1142
- """Shortcut for add_route with method OPTIONS."""
1143
- return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
1144
-
1145
- def add_get(
1146
- self,
1147
- path: str,
1148
- handler: Handler,
1149
- *,
1150
- name: Optional[str] = None,
1151
- allow_head: bool = True,
1152
- **kwargs: Any,
1153
- ) -> AbstractRoute:
1154
- """Shortcut for add_route with method GET.
1155
-
1156
- If allow_head is true, another
1157
- route is added allowing head requests to the same endpoint.
1158
- """
1159
- resource = self.add_resource(path, name=name)
1160
- if allow_head:
1161
- resource.add_route(hdrs.METH_HEAD, handler, **kwargs)
1162
- return resource.add_route(hdrs.METH_GET, handler, **kwargs)
1163
-
1164
- def add_post(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
1165
- """Shortcut for add_route with method POST."""
1166
- return self.add_route(hdrs.METH_POST, path, handler, **kwargs)
1167
-
1168
- def add_put(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
1169
- """Shortcut for add_route with method PUT."""
1170
- return self.add_route(hdrs.METH_PUT, path, handler, **kwargs)
1171
-
1172
- def add_patch(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
1173
- """Shortcut for add_route with method PATCH."""
1174
- return self.add_route(hdrs.METH_PATCH, path, handler, **kwargs)
1175
-
1176
- def add_delete(self, path: str, handler: Handler, **kwargs: Any) -> AbstractRoute:
1177
- """Shortcut for add_route with method DELETE."""
1178
- return self.add_route(hdrs.METH_DELETE, path, handler, **kwargs)
1179
-
1180
- def add_view(
1181
- self, path: str, handler: Type[AbstractView], **kwargs: Any
1182
- ) -> AbstractRoute:
1183
- """Shortcut for add_route with ANY methods for a class-based view."""
1184
- return self.add_route(hdrs.METH_ANY, path, handler, **kwargs)
1185
-
1186
- def freeze(self) -> None:
1187
- super().freeze()
1188
- for resource in self._resources:
1189
- resource.freeze()
1190
-
1191
- def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]:
1192
- """Append routes to route table.
1193
-
1194
- Parameter should be a sequence of RouteDef objects.
1195
-
1196
- Returns a list of registered AbstractRoute instances.
1197
- """
1198
- registered_routes = []
1199
- for route_def in routes:
1200
- registered_routes.extend(route_def.register(self))
1201
- return registered_routes
1202
-
1203
-
1204
- def _quote_path(value: str) -> str:
1205
- if YARL_VERSION < (1, 6):
1206
- value = value.replace("%", "%25")
1207
- return URL.build(path=value, encoded=False).raw_path
1208
-
1209
-
1210
- def _unquote_path(value: str) -> str:
1211
- return URL.build(path=value, encoded=True).path
1212
-
1213
-
1214
- def _requote_path(value: str) -> str:
1215
- # Quote non-ascii characters and other characters which must be quoted,
1216
- # but preserve existing %-sequences.
1217
- result = _quote_path(value)
1218
- if "%" in value:
1219
- result = result.replace("%25", "%")
1220
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/params.py DELETED
@@ -1,760 +0,0 @@
1
- import warnings
2
- from enum import Enum
3
- from typing import Any, Callable, Dict, List, Optional, Sequence, Union
4
-
5
- from pydantic.fields import FieldInfo
6
- from typing_extensions import Annotated, deprecated
7
-
8
- from ._compat import PYDANTIC_V2, Undefined
9
-
10
- _Unset: Any = Undefined
11
-
12
-
13
- class ParamTypes(Enum):
14
- query = "query"
15
- header = "header"
16
- path = "path"
17
- cookie = "cookie"
18
-
19
-
20
- class Param(FieldInfo):
21
- in_: ParamTypes
22
-
23
- def __init__(
24
- self,
25
- default: Any = Undefined,
26
- *,
27
- default_factory: Union[Callable[[], Any], None] = _Unset,
28
- annotation: Optional[Any] = None,
29
- alias: Optional[str] = None,
30
- alias_priority: Union[int, None] = _Unset,
31
- # TODO: update when deprecating Pydantic v1, import these types
32
- # validation_alias: str | AliasPath | AliasChoices | None
33
- validation_alias: Union[str, None] = None,
34
- serialization_alias: Union[str, None] = None,
35
- title: Optional[str] = None,
36
- description: Optional[str] = None,
37
- gt: Optional[float] = None,
38
- ge: Optional[float] = None,
39
- lt: Optional[float] = None,
40
- le: Optional[float] = None,
41
- min_length: Optional[int] = None,
42
- max_length: Optional[int] = None,
43
- pattern: Optional[str] = None,
44
- regex: Annotated[
45
- Optional[str],
46
- deprecated(
47
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
48
- ),
49
- ] = None,
50
- discriminator: Union[str, None] = None,
51
- strict: Union[bool, None] = _Unset,
52
- multiple_of: Union[float, None] = _Unset,
53
- allow_inf_nan: Union[bool, None] = _Unset,
54
- max_digits: Union[int, None] = _Unset,
55
- decimal_places: Union[int, None] = _Unset,
56
- examples: Optional[List[Any]] = None,
57
- example: Annotated[
58
- Optional[Any],
59
- deprecated(
60
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
61
- "although still supported. Use examples instead."
62
- ),
63
- ] = _Unset,
64
- deprecated: Optional[bool] = None,
65
- include_in_schema: bool = True,
66
- json_schema_extra: Union[Dict[str, Any], None] = None,
67
- **extra: Any,
68
- ):
69
- self.deprecated = deprecated
70
- if example is not _Unset:
71
- warnings.warn(
72
- "`example` has been depreacated, please use `examples` instead",
73
- category=DeprecationWarning,
74
- stacklevel=4,
75
- )
76
- self.example = example
77
- self.include_in_schema = include_in_schema
78
- kwargs = dict(
79
- default=default,
80
- default_factory=default_factory,
81
- alias=alias,
82
- title=title,
83
- description=description,
84
- gt=gt,
85
- ge=ge,
86
- lt=lt,
87
- le=le,
88
- min_length=min_length,
89
- max_length=max_length,
90
- discriminator=discriminator,
91
- multiple_of=multiple_of,
92
- allow_nan=allow_inf_nan,
93
- max_digits=max_digits,
94
- decimal_places=decimal_places,
95
- **extra,
96
- )
97
- if examples is not None:
98
- kwargs["examples"] = examples
99
- if regex is not None:
100
- warnings.warn(
101
- "`regex` has been depreacated, please use `pattern` instead",
102
- category=DeprecationWarning,
103
- stacklevel=4,
104
- )
105
- current_json_schema_extra = json_schema_extra or extra
106
- if PYDANTIC_V2:
107
- kwargs.update(
108
- {
109
- "annotation": annotation,
110
- "alias_priority": alias_priority,
111
- "validation_alias": validation_alias,
112
- "serialization_alias": serialization_alias,
113
- "strict": strict,
114
- "json_schema_extra": current_json_schema_extra,
115
- }
116
- )
117
- kwargs["pattern"] = pattern or regex
118
- else:
119
- kwargs["regex"] = pattern or regex
120
- kwargs.update(**current_json_schema_extra)
121
- use_kwargs = {k: v for k, v in kwargs.items() if v is not _Unset}
122
-
123
- super().__init__(**use_kwargs)
124
-
125
- def __repr__(self) -> str:
126
- return f"{self.__class__.__name__}({self.default})"
127
-
128
-
129
- class Path(Param):
130
- in_ = ParamTypes.path
131
-
132
- def __init__(
133
- self,
134
- default: Any = ...,
135
- *,
136
- default_factory: Union[Callable[[], Any], None] = _Unset,
137
- annotation: Optional[Any] = None,
138
- alias: Optional[str] = None,
139
- alias_priority: Union[int, None] = _Unset,
140
- # TODO: update when deprecating Pydantic v1, import these types
141
- # validation_alias: str | AliasPath | AliasChoices | None
142
- validation_alias: Union[str, None] = None,
143
- serialization_alias: Union[str, None] = None,
144
- title: Optional[str] = None,
145
- description: Optional[str] = None,
146
- gt: Optional[float] = None,
147
- ge: Optional[float] = None,
148
- lt: Optional[float] = None,
149
- le: Optional[float] = None,
150
- min_length: Optional[int] = None,
151
- max_length: Optional[int] = None,
152
- pattern: Optional[str] = None,
153
- regex: Annotated[
154
- Optional[str],
155
- deprecated(
156
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
157
- ),
158
- ] = None,
159
- discriminator: Union[str, None] = None,
160
- strict: Union[bool, None] = _Unset,
161
- multiple_of: Union[float, None] = _Unset,
162
- allow_inf_nan: Union[bool, None] = _Unset,
163
- max_digits: Union[int, None] = _Unset,
164
- decimal_places: Union[int, None] = _Unset,
165
- examples: Optional[List[Any]] = None,
166
- example: Annotated[
167
- Optional[Any],
168
- deprecated(
169
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
170
- "although still supported. Use examples instead."
171
- ),
172
- ] = _Unset,
173
- deprecated: Optional[bool] = None,
174
- include_in_schema: bool = True,
175
- json_schema_extra: Union[Dict[str, Any], None] = None,
176
- **extra: Any,
177
- ):
178
- assert default is ..., "Path parameters cannot have a default value"
179
- self.in_ = self.in_
180
- super().__init__(
181
- default=default,
182
- default_factory=default_factory,
183
- annotation=annotation,
184
- alias=alias,
185
- alias_priority=alias_priority,
186
- validation_alias=validation_alias,
187
- serialization_alias=serialization_alias,
188
- title=title,
189
- description=description,
190
- gt=gt,
191
- ge=ge,
192
- lt=lt,
193
- le=le,
194
- min_length=min_length,
195
- max_length=max_length,
196
- pattern=pattern,
197
- regex=regex,
198
- discriminator=discriminator,
199
- strict=strict,
200
- multiple_of=multiple_of,
201
- allow_inf_nan=allow_inf_nan,
202
- max_digits=max_digits,
203
- decimal_places=decimal_places,
204
- deprecated=deprecated,
205
- example=example,
206
- examples=examples,
207
- include_in_schema=include_in_schema,
208
- json_schema_extra=json_schema_extra,
209
- **extra,
210
- )
211
-
212
-
213
- class Query(Param):
214
- in_ = ParamTypes.query
215
-
216
- def __init__(
217
- self,
218
- default: Any = Undefined,
219
- *,
220
- default_factory: Union[Callable[[], Any], None] = _Unset,
221
- annotation: Optional[Any] = None,
222
- alias: Optional[str] = None,
223
- alias_priority: Union[int, None] = _Unset,
224
- # TODO: update when deprecating Pydantic v1, import these types
225
- # validation_alias: str | AliasPath | AliasChoices | None
226
- validation_alias: Union[str, None] = None,
227
- serialization_alias: Union[str, None] = None,
228
- title: Optional[str] = None,
229
- description: Optional[str] = None,
230
- gt: Optional[float] = None,
231
- ge: Optional[float] = None,
232
- lt: Optional[float] = None,
233
- le: Optional[float] = None,
234
- min_length: Optional[int] = None,
235
- max_length: Optional[int] = None,
236
- pattern: Optional[str] = None,
237
- regex: Annotated[
238
- Optional[str],
239
- deprecated(
240
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
241
- ),
242
- ] = None,
243
- discriminator: Union[str, None] = None,
244
- strict: Union[bool, None] = _Unset,
245
- multiple_of: Union[float, None] = _Unset,
246
- allow_inf_nan: Union[bool, None] = _Unset,
247
- max_digits: Union[int, None] = _Unset,
248
- decimal_places: Union[int, None] = _Unset,
249
- examples: Optional[List[Any]] = None,
250
- example: Annotated[
251
- Optional[Any],
252
- deprecated(
253
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
254
- "although still supported. Use examples instead."
255
- ),
256
- ] = _Unset,
257
- deprecated: Optional[bool] = None,
258
- include_in_schema: bool = True,
259
- json_schema_extra: Union[Dict[str, Any], None] = None,
260
- **extra: Any,
261
- ):
262
- super().__init__(
263
- default=default,
264
- default_factory=default_factory,
265
- annotation=annotation,
266
- alias=alias,
267
- alias_priority=alias_priority,
268
- validation_alias=validation_alias,
269
- serialization_alias=serialization_alias,
270
- title=title,
271
- description=description,
272
- gt=gt,
273
- ge=ge,
274
- lt=lt,
275
- le=le,
276
- min_length=min_length,
277
- max_length=max_length,
278
- pattern=pattern,
279
- regex=regex,
280
- discriminator=discriminator,
281
- strict=strict,
282
- multiple_of=multiple_of,
283
- allow_inf_nan=allow_inf_nan,
284
- max_digits=max_digits,
285
- decimal_places=decimal_places,
286
- deprecated=deprecated,
287
- example=example,
288
- examples=examples,
289
- include_in_schema=include_in_schema,
290
- json_schema_extra=json_schema_extra,
291
- **extra,
292
- )
293
-
294
-
295
- class Header(Param):
296
- in_ = ParamTypes.header
297
-
298
- def __init__(
299
- self,
300
- default: Any = Undefined,
301
- *,
302
- default_factory: Union[Callable[[], Any], None] = _Unset,
303
- annotation: Optional[Any] = None,
304
- alias: Optional[str] = None,
305
- alias_priority: Union[int, None] = _Unset,
306
- # TODO: update when deprecating Pydantic v1, import these types
307
- # validation_alias: str | AliasPath | AliasChoices | None
308
- validation_alias: Union[str, None] = None,
309
- serialization_alias: Union[str, None] = None,
310
- convert_underscores: bool = True,
311
- title: Optional[str] = None,
312
- description: Optional[str] = None,
313
- gt: Optional[float] = None,
314
- ge: Optional[float] = None,
315
- lt: Optional[float] = None,
316
- le: Optional[float] = None,
317
- min_length: Optional[int] = None,
318
- max_length: Optional[int] = None,
319
- pattern: Optional[str] = None,
320
- regex: Annotated[
321
- Optional[str],
322
- deprecated(
323
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
324
- ),
325
- ] = None,
326
- discriminator: Union[str, None] = None,
327
- strict: Union[bool, None] = _Unset,
328
- multiple_of: Union[float, None] = _Unset,
329
- allow_inf_nan: Union[bool, None] = _Unset,
330
- max_digits: Union[int, None] = _Unset,
331
- decimal_places: Union[int, None] = _Unset,
332
- examples: Optional[List[Any]] = None,
333
- example: Annotated[
334
- Optional[Any],
335
- deprecated(
336
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
337
- "although still supported. Use examples instead."
338
- ),
339
- ] = _Unset,
340
- deprecated: Optional[bool] = None,
341
- include_in_schema: bool = True,
342
- json_schema_extra: Union[Dict[str, Any], None] = None,
343
- **extra: Any,
344
- ):
345
- self.convert_underscores = convert_underscores
346
- super().__init__(
347
- default=default,
348
- default_factory=default_factory,
349
- annotation=annotation,
350
- alias=alias,
351
- alias_priority=alias_priority,
352
- validation_alias=validation_alias,
353
- serialization_alias=serialization_alias,
354
- title=title,
355
- description=description,
356
- gt=gt,
357
- ge=ge,
358
- lt=lt,
359
- le=le,
360
- min_length=min_length,
361
- max_length=max_length,
362
- pattern=pattern,
363
- regex=regex,
364
- discriminator=discriminator,
365
- strict=strict,
366
- multiple_of=multiple_of,
367
- allow_inf_nan=allow_inf_nan,
368
- max_digits=max_digits,
369
- decimal_places=decimal_places,
370
- deprecated=deprecated,
371
- example=example,
372
- examples=examples,
373
- include_in_schema=include_in_schema,
374
- json_schema_extra=json_schema_extra,
375
- **extra,
376
- )
377
-
378
-
379
- class Cookie(Param):
380
- in_ = ParamTypes.cookie
381
-
382
- def __init__(
383
- self,
384
- default: Any = Undefined,
385
- *,
386
- default_factory: Union[Callable[[], Any], None] = _Unset,
387
- annotation: Optional[Any] = None,
388
- alias: Optional[str] = None,
389
- alias_priority: Union[int, None] = _Unset,
390
- # TODO: update when deprecating Pydantic v1, import these types
391
- # validation_alias: str | AliasPath | AliasChoices | None
392
- validation_alias: Union[str, None] = None,
393
- serialization_alias: Union[str, None] = None,
394
- title: Optional[str] = None,
395
- description: Optional[str] = None,
396
- gt: Optional[float] = None,
397
- ge: Optional[float] = None,
398
- lt: Optional[float] = None,
399
- le: Optional[float] = None,
400
- min_length: Optional[int] = None,
401
- max_length: Optional[int] = None,
402
- pattern: Optional[str] = None,
403
- regex: Annotated[
404
- Optional[str],
405
- deprecated(
406
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
407
- ),
408
- ] = None,
409
- discriminator: Union[str, None] = None,
410
- strict: Union[bool, None] = _Unset,
411
- multiple_of: Union[float, None] = _Unset,
412
- allow_inf_nan: Union[bool, None] = _Unset,
413
- max_digits: Union[int, None] = _Unset,
414
- decimal_places: Union[int, None] = _Unset,
415
- examples: Optional[List[Any]] = None,
416
- example: Annotated[
417
- Optional[Any],
418
- deprecated(
419
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
420
- "although still supported. Use examples instead."
421
- ),
422
- ] = _Unset,
423
- deprecated: Optional[bool] = None,
424
- include_in_schema: bool = True,
425
- json_schema_extra: Union[Dict[str, Any], None] = None,
426
- **extra: Any,
427
- ):
428
- super().__init__(
429
- default=default,
430
- default_factory=default_factory,
431
- annotation=annotation,
432
- alias=alias,
433
- alias_priority=alias_priority,
434
- validation_alias=validation_alias,
435
- serialization_alias=serialization_alias,
436
- title=title,
437
- description=description,
438
- gt=gt,
439
- ge=ge,
440
- lt=lt,
441
- le=le,
442
- min_length=min_length,
443
- max_length=max_length,
444
- pattern=pattern,
445
- regex=regex,
446
- discriminator=discriminator,
447
- strict=strict,
448
- multiple_of=multiple_of,
449
- allow_inf_nan=allow_inf_nan,
450
- max_digits=max_digits,
451
- decimal_places=decimal_places,
452
- deprecated=deprecated,
453
- example=example,
454
- examples=examples,
455
- include_in_schema=include_in_schema,
456
- json_schema_extra=json_schema_extra,
457
- **extra,
458
- )
459
-
460
-
461
- class Body(FieldInfo):
462
- def __init__(
463
- self,
464
- default: Any = Undefined,
465
- *,
466
- default_factory: Union[Callable[[], Any], None] = _Unset,
467
- annotation: Optional[Any] = None,
468
- embed: bool = False,
469
- media_type: str = "application/json",
470
- alias: Optional[str] = None,
471
- alias_priority: Union[int, None] = _Unset,
472
- # TODO: update when deprecating Pydantic v1, import these types
473
- # validation_alias: str | AliasPath | AliasChoices | None
474
- validation_alias: Union[str, None] = None,
475
- serialization_alias: Union[str, None] = None,
476
- title: Optional[str] = None,
477
- description: Optional[str] = None,
478
- gt: Optional[float] = None,
479
- ge: Optional[float] = None,
480
- lt: Optional[float] = None,
481
- le: Optional[float] = None,
482
- min_length: Optional[int] = None,
483
- max_length: Optional[int] = None,
484
- pattern: Optional[str] = None,
485
- regex: Annotated[
486
- Optional[str],
487
- deprecated(
488
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
489
- ),
490
- ] = None,
491
- discriminator: Union[str, None] = None,
492
- strict: Union[bool, None] = _Unset,
493
- multiple_of: Union[float, None] = _Unset,
494
- allow_inf_nan: Union[bool, None] = _Unset,
495
- max_digits: Union[int, None] = _Unset,
496
- decimal_places: Union[int, None] = _Unset,
497
- examples: Optional[List[Any]] = None,
498
- example: Annotated[
499
- Optional[Any],
500
- deprecated(
501
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
502
- "although still supported. Use examples instead."
503
- ),
504
- ] = _Unset,
505
- deprecated: Optional[bool] = None,
506
- include_in_schema: bool = True,
507
- json_schema_extra: Union[Dict[str, Any], None] = None,
508
- **extra: Any,
509
- ):
510
- self.embed = embed
511
- self.media_type = media_type
512
- self.deprecated = deprecated
513
- if example is not _Unset:
514
- warnings.warn(
515
- "`example` has been depreacated, please use `examples` instead",
516
- category=DeprecationWarning,
517
- stacklevel=4,
518
- )
519
- self.example = example
520
- self.include_in_schema = include_in_schema
521
- kwargs = dict(
522
- default=default,
523
- default_factory=default_factory,
524
- alias=alias,
525
- title=title,
526
- description=description,
527
- gt=gt,
528
- ge=ge,
529
- lt=lt,
530
- le=le,
531
- min_length=min_length,
532
- max_length=max_length,
533
- discriminator=discriminator,
534
- multiple_of=multiple_of,
535
- allow_nan=allow_inf_nan,
536
- max_digits=max_digits,
537
- decimal_places=decimal_places,
538
- **extra,
539
- )
540
- if examples is not None:
541
- kwargs["examples"] = examples
542
- if regex is not None:
543
- warnings.warn(
544
- "`regex` has been depreacated, please use `pattern` instead",
545
- category=DeprecationWarning,
546
- stacklevel=4,
547
- )
548
- current_json_schema_extra = json_schema_extra or extra
549
- if PYDANTIC_V2:
550
- kwargs.update(
551
- {
552
- "annotation": annotation,
553
- "alias_priority": alias_priority,
554
- "validation_alias": validation_alias,
555
- "serialization_alias": serialization_alias,
556
- "strict": strict,
557
- "json_schema_extra": current_json_schema_extra,
558
- }
559
- )
560
- kwargs["pattern"] = pattern or regex
561
- else:
562
- kwargs["regex"] = pattern or regex
563
- kwargs.update(**current_json_schema_extra)
564
-
565
- use_kwargs = {k: v for k, v in kwargs.items() if v is not _Unset}
566
-
567
- super().__init__(**use_kwargs)
568
-
569
- def __repr__(self) -> str:
570
- return f"{self.__class__.__name__}({self.default})"
571
-
572
-
573
- class Form(Body):
574
- def __init__(
575
- self,
576
- default: Any = Undefined,
577
- *,
578
- default_factory: Union[Callable[[], Any], None] = _Unset,
579
- annotation: Optional[Any] = None,
580
- media_type: str = "application/x-www-form-urlencoded",
581
- alias: Optional[str] = None,
582
- alias_priority: Union[int, None] = _Unset,
583
- # TODO: update when deprecating Pydantic v1, import these types
584
- # validation_alias: str | AliasPath | AliasChoices | None
585
- validation_alias: Union[str, None] = None,
586
- serialization_alias: Union[str, None] = None,
587
- title: Optional[str] = None,
588
- description: Optional[str] = None,
589
- gt: Optional[float] = None,
590
- ge: Optional[float] = None,
591
- lt: Optional[float] = None,
592
- le: Optional[float] = None,
593
- min_length: Optional[int] = None,
594
- max_length: Optional[int] = None,
595
- pattern: Optional[str] = None,
596
- regex: Annotated[
597
- Optional[str],
598
- deprecated(
599
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
600
- ),
601
- ] = None,
602
- discriminator: Union[str, None] = None,
603
- strict: Union[bool, None] = _Unset,
604
- multiple_of: Union[float, None] = _Unset,
605
- allow_inf_nan: Union[bool, None] = _Unset,
606
- max_digits: Union[int, None] = _Unset,
607
- decimal_places: Union[int, None] = _Unset,
608
- examples: Optional[List[Any]] = None,
609
- example: Annotated[
610
- Optional[Any],
611
- deprecated(
612
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
613
- "although still supported. Use examples instead."
614
- ),
615
- ] = _Unset,
616
- deprecated: Optional[bool] = None,
617
- include_in_schema: bool = True,
618
- json_schema_extra: Union[Dict[str, Any], None] = None,
619
- **extra: Any,
620
- ):
621
- super().__init__(
622
- default=default,
623
- default_factory=default_factory,
624
- annotation=annotation,
625
- embed=True,
626
- media_type=media_type,
627
- alias=alias,
628
- alias_priority=alias_priority,
629
- validation_alias=validation_alias,
630
- serialization_alias=serialization_alias,
631
- title=title,
632
- description=description,
633
- gt=gt,
634
- ge=ge,
635
- lt=lt,
636
- le=le,
637
- min_length=min_length,
638
- max_length=max_length,
639
- pattern=pattern,
640
- regex=regex,
641
- discriminator=discriminator,
642
- strict=strict,
643
- multiple_of=multiple_of,
644
- allow_inf_nan=allow_inf_nan,
645
- max_digits=max_digits,
646
- decimal_places=decimal_places,
647
- deprecated=deprecated,
648
- example=example,
649
- examples=examples,
650
- include_in_schema=include_in_schema,
651
- json_schema_extra=json_schema_extra,
652
- **extra,
653
- )
654
-
655
-
656
- class File(Form):
657
- def __init__(
658
- self,
659
- default: Any = Undefined,
660
- *,
661
- default_factory: Union[Callable[[], Any], None] = _Unset,
662
- annotation: Optional[Any] = None,
663
- media_type: str = "multipart/form-data",
664
- alias: Optional[str] = None,
665
- alias_priority: Union[int, None] = _Unset,
666
- # TODO: update when deprecating Pydantic v1, import these types
667
- # validation_alias: str | AliasPath | AliasChoices | None
668
- validation_alias: Union[str, None] = None,
669
- serialization_alias: Union[str, None] = None,
670
- title: Optional[str] = None,
671
- description: Optional[str] = None,
672
- gt: Optional[float] = None,
673
- ge: Optional[float] = None,
674
- lt: Optional[float] = None,
675
- le: Optional[float] = None,
676
- min_length: Optional[int] = None,
677
- max_length: Optional[int] = None,
678
- pattern: Optional[str] = None,
679
- regex: Annotated[
680
- Optional[str],
681
- deprecated(
682
- "Deprecated in FastAPI 0.100.0 and Pydantic v2, use `pattern` instead."
683
- ),
684
- ] = None,
685
- discriminator: Union[str, None] = None,
686
- strict: Union[bool, None] = _Unset,
687
- multiple_of: Union[float, None] = _Unset,
688
- allow_inf_nan: Union[bool, None] = _Unset,
689
- max_digits: Union[int, None] = _Unset,
690
- decimal_places: Union[int, None] = _Unset,
691
- examples: Optional[List[Any]] = None,
692
- example: Annotated[
693
- Optional[Any],
694
- deprecated(
695
- "Deprecated in OpenAPI 3.1.0 that now uses JSON Schema 2020-12, "
696
- "although still supported. Use examples instead."
697
- ),
698
- ] = _Unset,
699
- deprecated: Optional[bool] = None,
700
- include_in_schema: bool = True,
701
- json_schema_extra: Union[Dict[str, Any], None] = None,
702
- **extra: Any,
703
- ):
704
- super().__init__(
705
- default=default,
706
- default_factory=default_factory,
707
- annotation=annotation,
708
- media_type=media_type,
709
- alias=alias,
710
- alias_priority=alias_priority,
711
- validation_alias=validation_alias,
712
- serialization_alias=serialization_alias,
713
- title=title,
714
- description=description,
715
- gt=gt,
716
- ge=ge,
717
- lt=lt,
718
- le=le,
719
- min_length=min_length,
720
- max_length=max_length,
721
- pattern=pattern,
722
- regex=regex,
723
- discriminator=discriminator,
724
- strict=strict,
725
- multiple_of=multiple_of,
726
- allow_inf_nan=allow_inf_nan,
727
- max_digits=max_digits,
728
- decimal_places=decimal_places,
729
- deprecated=deprecated,
730
- example=example,
731
- examples=examples,
732
- include_in_schema=include_in_schema,
733
- json_schema_extra=json_schema_extra,
734
- **extra,
735
- )
736
-
737
-
738
- class Depends:
739
- def __init__(
740
- self, dependency: Optional[Callable[..., Any]] = None, *, use_cache: bool = True
741
- ):
742
- self.dependency = dependency
743
- self.use_cache = use_cache
744
-
745
- def __repr__(self) -> str:
746
- attr = getattr(self.dependency, "__name__", type(self.dependency).__name__)
747
- cache = "" if self.use_cache else ", use_cache=False"
748
- return f"{self.__class__.__name__}({attr}{cache})"
749
-
750
-
751
- class Security(Depends):
752
- def __init__(
753
- self,
754
- dependency: Optional[Callable[..., Any]] = None,
755
- *,
756
- scopes: Optional[Sequence[str]] = None,
757
- use_cache: bool = True,
758
- ):
759
- super().__init__(dependency=dependency, use_cache=use_cache)
760
- self.scopes = scopes or []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I_V_.py DELETED
@@ -1,20 +0,0 @@
1
- from fontTools.misc.textTools import strjoin, tobytes, tostr
2
- from . import asciiTable
3
-
4
-
5
- class table_T_S_I_V_(asciiTable.asciiTable):
6
- def toXML(self, writer, ttFont):
7
- data = tostr(self.data)
8
- # removing null bytes. XXX needed??
9
- data = data.split("\0")
10
- data = strjoin(data)
11
- writer.begintag("source")
12
- writer.newline()
13
- writer.write_noindent(data.replace("\r", "\n"))
14
- writer.newline()
15
- writer.endtag("source")
16
- writer.newline()
17
-
18
- def fromXML(self, name, attrs, content, ttFont):
19
- lines = strjoin(content).split("\n")
20
- self.data = tobytes("\r".join(lines[1:-1]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/ftp.py DELETED
@@ -1,380 +0,0 @@
1
- import os
2
- import sys
3
- import uuid
4
- import warnings
5
- from ftplib import FTP, Error, error_perm
6
- from typing import Any
7
-
8
- from ..spec import AbstractBufferedFile, AbstractFileSystem
9
- from ..utils import infer_storage_options, isfilelike
10
-
11
-
12
- class FTPFileSystem(AbstractFileSystem):
13
- """A filesystem over classic FTP"""
14
-
15
- root_marker = "/"
16
- cachable = False
17
- protocol = "ftp"
18
-
19
- def __init__(
20
- self,
21
- host,
22
- port=21,
23
- username=None,
24
- password=None,
25
- acct=None,
26
- block_size=None,
27
- tempdir=None,
28
- timeout=30,
29
- encoding="utf-8",
30
- **kwargs,
31
- ):
32
- """
33
- You can use _get_kwargs_from_urls to get some kwargs from
34
- a reasonable FTP url.
35
-
36
- Authentication will be anonymous if username/password are not
37
- given.
38
-
39
- Parameters
40
- ----------
41
- host: str
42
- The remote server name/ip to connect to
43
- port: int
44
- Port to connect with
45
- username: str or None
46
- If authenticating, the user's identifier
47
- password: str of None
48
- User's password on the server, if using
49
- acct: str or None
50
- Some servers also need an "account" string for auth
51
- block_size: int or None
52
- If given, the read-ahead or write buffer size.
53
- tempdir: str
54
- Directory on remote to put temporary files when in a transaction
55
- timeout: int
56
- Timeout of the ftp connection in seconds
57
- encoding: str
58
- Encoding to use for directories and filenames in FTP connection
59
- """
60
- super(FTPFileSystem, self).__init__(**kwargs)
61
- self.host = host
62
- self.port = port
63
- self.tempdir = tempdir or "/tmp"
64
- self.cred = username, password, acct
65
- self.timeout = timeout
66
- self.encoding = encoding
67
- if block_size is not None:
68
- self.blocksize = block_size
69
- else:
70
- self.blocksize = 2**16
71
- self._connect()
72
-
73
- def _connect(self):
74
- if sys.version_info >= (3, 9):
75
- self.ftp = FTP(timeout=self.timeout, encoding=self.encoding)
76
- elif self.encoding:
77
- warnings.warn("`encoding` not supported for python<3.9, ignoring")
78
- self.ftp = FTP(timeout=self.timeout)
79
- else:
80
- self.ftp = FTP(timeout=self.timeout)
81
- self.ftp.connect(self.host, self.port)
82
- self.ftp.login(*self.cred)
83
-
84
- @classmethod
85
- def _strip_protocol(cls, path):
86
- return "/" + infer_storage_options(path)["path"].lstrip("/").rstrip("/")
87
-
88
- @staticmethod
89
- def _get_kwargs_from_urls(urlpath):
90
- out = infer_storage_options(urlpath)
91
- out.pop("path", None)
92
- out.pop("protocol", None)
93
- return out
94
-
95
- def ls(self, path, detail=True, **kwargs):
96
- path = self._strip_protocol(path)
97
- out = []
98
- if path not in self.dircache:
99
- try:
100
- try:
101
- out = [
102
- (fn, details)
103
- for (fn, details) in self.ftp.mlsd(path)
104
- if fn not in [".", ".."]
105
- and details["type"] not in ["pdir", "cdir"]
106
- ]
107
- except error_perm:
108
- out = _mlsd2(self.ftp, path) # Not platform independent
109
- for fn, details in out:
110
- if path == "/":
111
- path = "" # just for forming the names, below
112
- details["name"] = "/".join([path, fn.lstrip("/")])
113
- if details["type"] == "file":
114
- details["size"] = int(details["size"])
115
- else:
116
- details["size"] = 0
117
- if details["type"] == "dir":
118
- details["type"] = "directory"
119
- self.dircache[path] = out
120
- except Error:
121
- try:
122
- info = self.info(path)
123
- if info["type"] == "file":
124
- out = [(path, info)]
125
- except (Error, IndexError):
126
- raise FileNotFoundError(path)
127
- files = self.dircache.get(path, out)
128
- if not detail:
129
- return sorted([fn for fn, details in files])
130
- return [details for fn, details in files]
131
-
132
- def info(self, path, **kwargs):
133
- # implement with direct method
134
- path = self._strip_protocol(path)
135
- if path == "/":
136
- # special case, since this dir has no real entry
137
- return {"name": "/", "size": 0, "type": "directory"}
138
- files = self.ls(self._parent(path).lstrip("/"), True)
139
- try:
140
- out = [f for f in files if f["name"] == path][0]
141
- except IndexError:
142
- raise FileNotFoundError(path)
143
- return out
144
-
145
- def get_file(self, rpath, lpath, **kwargs):
146
- if self.isdir(rpath):
147
- if not os.path.exists(lpath):
148
- os.mkdir(lpath)
149
- return
150
- if isfilelike(lpath):
151
- outfile = lpath
152
- else:
153
- outfile = open(lpath, "wb")
154
-
155
- def cb(x):
156
- outfile.write(x)
157
-
158
- self.ftp.retrbinary(
159
- "RETR %s" % rpath,
160
- blocksize=self.blocksize,
161
- callback=cb,
162
- )
163
- if not isfilelike(lpath):
164
- outfile.close()
165
-
166
- def cat_file(self, path, start=None, end=None, **kwargs):
167
- if end is not None:
168
- return super().cat_file(path, start, end, **kwargs)
169
- out = []
170
-
171
- def cb(x):
172
- out.append(x)
173
-
174
- self.ftp.retrbinary(
175
- "RETR %s" % path,
176
- blocksize=self.blocksize,
177
- rest=start,
178
- callback=cb,
179
- )
180
- return b"".join(out)
181
-
182
- def _open(
183
- self,
184
- path,
185
- mode="rb",
186
- block_size=None,
187
- cache_options=None,
188
- autocommit=True,
189
- **kwargs,
190
- ):
191
- path = self._strip_protocol(path)
192
- block_size = block_size or self.blocksize
193
- return FTPFile(
194
- self,
195
- path,
196
- mode=mode,
197
- block_size=block_size,
198
- tempdir=self.tempdir,
199
- autocommit=autocommit,
200
- cache_options=cache_options,
201
- )
202
-
203
- def _rm(self, path):
204
- path = self._strip_protocol(path)
205
- self.ftp.delete(path)
206
- self.invalidate_cache(self._parent(path))
207
-
208
- def rm(self, path, recursive=False, maxdepth=None):
209
- paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
210
- for p in reversed(paths):
211
- if self.isfile(p):
212
- self.rm_file(p)
213
- else:
214
- self.rmdir(p)
215
-
216
- def mkdir(self, path: str, create_parents: bool = True, **kwargs: Any) -> None:
217
- path = self._strip_protocol(path)
218
- parent = self._parent(path)
219
- if parent != self.root_marker and not self.exists(parent) and create_parents:
220
- self.mkdir(parent, create_parents=create_parents)
221
-
222
- self.ftp.mkd(path)
223
- self.invalidate_cache(self._parent(path))
224
-
225
- def makedirs(self, path: str, exist_ok: bool = False) -> None:
226
- path = self._strip_protocol(path)
227
- if self.exists(path):
228
- # NB: "/" does not "exist" as it has no directory entry
229
- if not exist_ok:
230
- raise FileExistsError(f"{path} exists without `exist_ok`")
231
- # exists_ok=True -> no-op
232
- else:
233
- self.mkdir(path, create_parents=True)
234
-
235
- def rmdir(self, path):
236
- path = self._strip_protocol(path)
237
- self.ftp.rmd(path)
238
- self.invalidate_cache(self._parent(path))
239
-
240
- def mv(self, path1, path2, **kwargs):
241
- path1 = self._strip_protocol(path1)
242
- path2 = self._strip_protocol(path2)
243
- self.ftp.rename(path1, path2)
244
- self.invalidate_cache(self._parent(path1))
245
- self.invalidate_cache(self._parent(path2))
246
-
247
- def __del__(self):
248
- self.ftp.close()
249
-
250
- def invalidate_cache(self, path=None):
251
- if path is None:
252
- self.dircache.clear()
253
- else:
254
- self.dircache.pop(path, None)
255
- super(FTPFileSystem, self).invalidate_cache(path)
256
-
257
-
258
- class TransferDone(Exception):
259
- """Internal exception to break out of transfer"""
260
-
261
- pass
262
-
263
-
264
- class FTPFile(AbstractBufferedFile):
265
- """Interact with a remote FTP file with read/write buffering"""
266
-
267
- def __init__(
268
- self,
269
- fs,
270
- path,
271
- mode="rb",
272
- block_size="default",
273
- autocommit=True,
274
- cache_type="readahead",
275
- cache_options=None,
276
- **kwargs,
277
- ):
278
- super().__init__(
279
- fs,
280
- path,
281
- mode=mode,
282
- block_size=block_size,
283
- autocommit=autocommit,
284
- cache_type=cache_type,
285
- cache_options=cache_options,
286
- **kwargs,
287
- )
288
- if not autocommit:
289
- self.target = self.path
290
- self.path = "/".join([kwargs["tempdir"], str(uuid.uuid4())])
291
-
292
- def commit(self):
293
- self.fs.mv(self.path, self.target)
294
-
295
- def discard(self):
296
- self.fs.rm(self.path)
297
-
298
- def _fetch_range(self, start, end):
299
- """Get bytes between given byte limits
300
-
301
- Implemented by raising an exception in the fetch callback when the
302
- number of bytes received reaches the requested amount.
303
-
304
- Will fail if the server does not respect the REST command on
305
- retrieve requests.
306
- """
307
- out = []
308
- total = [0]
309
-
310
- def callback(x):
311
- total[0] += len(x)
312
- if total[0] > end - start:
313
- out.append(x[: (end - start) - total[0]])
314
- if end < self.size:
315
- raise TransferDone
316
- else:
317
- out.append(x)
318
-
319
- if total[0] == end - start and end < self.size:
320
- raise TransferDone
321
-
322
- try:
323
- self.fs.ftp.retrbinary(
324
- "RETR %s" % self.path,
325
- blocksize=self.blocksize,
326
- rest=start,
327
- callback=callback,
328
- )
329
- except TransferDone:
330
- try:
331
- # stop transfer, we got enough bytes for this block
332
- self.fs.ftp.abort()
333
- self.fs.ftp.getmultiline()
334
- except Error:
335
- self.fs._connect()
336
-
337
- return b"".join(out)
338
-
339
- def _upload_chunk(self, final=False):
340
- self.buffer.seek(0)
341
- self.fs.ftp.storbinary(
342
- "STOR " + self.path, self.buffer, blocksize=self.blocksize, rest=self.offset
343
- )
344
- return True
345
-
346
-
347
- def _mlsd2(ftp, path="."):
348
- """
349
- Fall back to using `dir` instead of `mlsd` if not supported.
350
-
351
- This parses a Linux style `ls -l` response to `dir`, but the response may
352
- be platform dependent.
353
-
354
- Parameters
355
- ----------
356
- ftp: ftplib.FTP
357
- path: str
358
- Expects to be given path, but defaults to ".".
359
- """
360
- lines = []
361
- minfo = []
362
- ftp.dir(path, lines.append)
363
- for line in lines:
364
- line = line.split()
365
- this = (
366
- line[-1],
367
- {
368
- "modify": " ".join(line[5:8]),
369
- "unix.owner": line[2],
370
- "unix.group": line[3],
371
- "unix.mode": line[0],
372
- "size": line[4],
373
- },
374
- )
375
- if "d" == this[1]["unix.mode"][0]:
376
- this[1]["type"] = "dir"
377
- else:
378
- this[1]["type"] = "file"
379
- minfo.append(this)
380
- return minfo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeepDrivePL/PaddleSeg-Matting/matting/model/resnet_vd.py DELETED
@@ -1,368 +0,0 @@
1
- # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import paddle
16
- import paddle.nn as nn
17
- import paddle.nn.functional as F
18
-
19
- from paddleseg.cvlibs import manager
20
- from paddleseg.models import layers
21
- from paddleseg.utils import utils
22
-
23
- __all__ = [
24
- "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd"
25
- ]
26
-
27
-
28
- class ConvBNLayer(nn.Layer):
29
- def __init__(
30
- self,
31
- in_channels,
32
- out_channels,
33
- kernel_size,
34
- stride=1,
35
- dilation=1,
36
- groups=1,
37
- is_vd_mode=False,
38
- act=None,
39
- ):
40
- super(ConvBNLayer, self).__init__()
41
-
42
- self.is_vd_mode = is_vd_mode
43
- self._pool2d_avg = nn.AvgPool2D(
44
- kernel_size=2, stride=2, padding=0, ceil_mode=True)
45
- self._conv = nn.Conv2D(
46
- in_channels=in_channels,
47
- out_channels=out_channels,
48
- kernel_size=kernel_size,
49
- stride=stride,
50
- padding=(kernel_size - 1) // 2 if dilation == 1 else 0,
51
- dilation=dilation,
52
- groups=groups,
53
- bias_attr=False)
54
-
55
- self._batch_norm = layers.SyncBatchNorm(out_channels)
56
- self._act_op = layers.Activation(act=act)
57
-
58
- def forward(self, inputs):
59
- if self.is_vd_mode:
60
- inputs = self._pool2d_avg(inputs)
61
- y = self._conv(inputs)
62
- y = self._batch_norm(y)
63
- y = self._act_op(y)
64
-
65
- return y
66
-
67
-
68
- class BottleneckBlock(nn.Layer):
69
- def __init__(self,
70
- in_channels,
71
- out_channels,
72
- stride,
73
- shortcut=True,
74
- if_first=False,
75
- dilation=1):
76
- super(BottleneckBlock, self).__init__()
77
-
78
- self.conv0 = ConvBNLayer(
79
- in_channels=in_channels,
80
- out_channels=out_channels,
81
- kernel_size=1,
82
- act='relu')
83
-
84
- self.dilation = dilation
85
-
86
- self.conv1 = ConvBNLayer(
87
- in_channels=out_channels,
88
- out_channels=out_channels,
89
- kernel_size=3,
90
- stride=stride,
91
- act='relu',
92
- dilation=dilation)
93
- self.conv2 = ConvBNLayer(
94
- in_channels=out_channels,
95
- out_channels=out_channels * 4,
96
- kernel_size=1,
97
- act=None)
98
-
99
- if not shortcut:
100
- self.short = ConvBNLayer(
101
- in_channels=in_channels,
102
- out_channels=out_channels * 4,
103
- kernel_size=1,
104
- stride=1,
105
- is_vd_mode=False if if_first or stride == 1 else True)
106
-
107
- self.shortcut = shortcut
108
-
109
- def forward(self, inputs):
110
- y = self.conv0(inputs)
111
-
112
- ####################################################################
113
- # If given dilation rate > 1, using corresponding padding.
114
- # The performance drops down without the follow padding.
115
- if self.dilation > 1:
116
- padding = self.dilation
117
- y = F.pad(y, [padding, padding, padding, padding])
118
- #####################################################################
119
-
120
- conv1 = self.conv1(y)
121
- conv2 = self.conv2(conv1)
122
-
123
- if self.shortcut:
124
- short = inputs
125
- else:
126
- short = self.short(inputs)
127
-
128
- y = paddle.add(x=short, y=conv2)
129
- y = F.relu(y)
130
- return y
131
-
132
-
133
- class BasicBlock(nn.Layer):
134
- def __init__(self,
135
- in_channels,
136
- out_channels,
137
- stride,
138
- shortcut=True,
139
- if_first=False):
140
- super(BasicBlock, self).__init__()
141
- self.stride = stride
142
- self.conv0 = ConvBNLayer(
143
- in_channels=in_channels,
144
- out_channels=out_channels,
145
- kernel_size=3,
146
- stride=stride,
147
- act='relu')
148
- self.conv1 = ConvBNLayer(
149
- in_channels=out_channels,
150
- out_channels=out_channels,
151
- kernel_size=3,
152
- act=None)
153
-
154
- if not shortcut:
155
- self.short = ConvBNLayer(
156
- in_channels=in_channels,
157
- out_channels=out_channels,
158
- kernel_size=1,
159
- stride=1,
160
- is_vd_mode=False if if_first else True)
161
-
162
- self.shortcut = shortcut
163
-
164
- def forward(self, inputs):
165
- y = self.conv0(inputs)
166
- conv1 = self.conv1(y)
167
-
168
- if self.shortcut:
169
- short = inputs
170
- else:
171
- short = self.short(inputs)
172
- y = paddle.add(x=short, y=conv1)
173
- y = F.relu(y)
174
-
175
- return y
176
-
177
-
178
- class ResNet_vd(nn.Layer):
179
- """
180
- The ResNet_vd implementation based on PaddlePaddle.
181
-
182
- The original article refers to Jingdong
183
- Tong He, et, al. "Bag of Tricks for Image Classification with Convolutional Neural Networks"
184
- (https://arxiv.org/pdf/1812.01187.pdf).
185
-
186
- Args:
187
- layers (int, optional): The layers of ResNet_vd. The supported layers are (18, 34, 50, 101, 152, 200). Default: 50.
188
- output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8.
189
- multi_grid (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1).
190
- pretrained (str, optional): The path of pretrained model.
191
-
192
- """
193
-
194
- def __init__(self,
195
- input_channels=3,
196
- layers=50,
197
- output_stride=32,
198
- multi_grid=(1, 1, 1),
199
- pretrained=None):
200
- super(ResNet_vd, self).__init__()
201
-
202
- self.conv1_logit = None # for gscnn shape stream
203
- self.layers = layers
204
- supported_layers = [18, 34, 50, 101, 152, 200]
205
- assert layers in supported_layers, \
206
- "supported layers are {} but input layer is {}".format(
207
- supported_layers, layers)
208
-
209
- if layers == 18:
210
- depth = [2, 2, 2, 2]
211
- elif layers == 34 or layers == 50:
212
- depth = [3, 4, 6, 3]
213
- elif layers == 101:
214
- depth = [3, 4, 23, 3]
215
- elif layers == 152:
216
- depth = [3, 8, 36, 3]
217
- elif layers == 200:
218
- depth = [3, 12, 48, 3]
219
- num_channels = [64, 256, 512, 1024
220
- ] if layers >= 50 else [64, 64, 128, 256]
221
- num_filters = [64, 128, 256, 512]
222
-
223
- # for channels of four returned stages
224
- self.feat_channels = [c * 4 for c in num_filters
225
- ] if layers >= 50 else num_filters
226
- self.feat_channels = [64] + self.feat_channels
227
-
228
- dilation_dict = None
229
- if output_stride == 8:
230
- dilation_dict = {2: 2, 3: 4}
231
- elif output_stride == 16:
232
- dilation_dict = {3: 2}
233
-
234
- self.conv1_1 = ConvBNLayer(
235
- in_channels=input_channels,
236
- out_channels=32,
237
- kernel_size=3,
238
- stride=2,
239
- act='relu')
240
- self.conv1_2 = ConvBNLayer(
241
- in_channels=32,
242
- out_channels=32,
243
- kernel_size=3,
244
- stride=1,
245
- act='relu')
246
- self.conv1_3 = ConvBNLayer(
247
- in_channels=32,
248
- out_channels=64,
249
- kernel_size=3,
250
- stride=1,
251
- act='relu')
252
- self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
253
-
254
- # self.block_list = []
255
- self.stage_list = []
256
- if layers >= 50:
257
- for block in range(len(depth)):
258
- shortcut = False
259
- block_list = []
260
- for i in range(depth[block]):
261
- if layers in [101, 152] and block == 2:
262
- if i == 0:
263
- conv_name = "res" + str(block + 2) + "a"
264
- else:
265
- conv_name = "res" + str(block + 2) + "b" + str(i)
266
- else:
267
- conv_name = "res" + str(block + 2) + chr(97 + i)
268
-
269
- ###############################################################################
270
- # Add dilation rate for some segmentation tasks, if dilation_dict is not None.
271
- dilation_rate = dilation_dict[
272
- block] if dilation_dict and block in dilation_dict else 1
273
-
274
- # Actually block here is 'stage', and i is 'block' in 'stage'
275
- # At the stage 4, expand the the dilation_rate if given multi_grid
276
- if block == 3:
277
- dilation_rate = dilation_rate * multi_grid[i]
278
- ###############################################################################
279
-
280
- bottleneck_block = self.add_sublayer(
281
- 'bb_%d_%d' % (block, i),
282
- BottleneckBlock(
283
- in_channels=num_channels[block]
284
- if i == 0 else num_filters[block] * 4,
285
- out_channels=num_filters[block],
286
- stride=2 if i == 0 and block != 0
287
- and dilation_rate == 1 else 1,
288
- shortcut=shortcut,
289
- if_first=block == i == 0,
290
- dilation=dilation_rate))
291
-
292
- block_list.append(bottleneck_block)
293
- shortcut = True
294
- self.stage_list.append(block_list)
295
- else:
296
- for block in range(len(depth)):
297
- shortcut = False
298
- block_list = []
299
- for i in range(depth[block]):
300
- conv_name = "res" + str(block + 2) + chr(97 + i)
301
- basic_block = self.add_sublayer(
302
- 'bb_%d_%d' % (block, i),
303
- BasicBlock(
304
- in_channels=num_channels[block]
305
- if i == 0 else num_filters[block],
306
- out_channels=num_filters[block],
307
- stride=2 if i == 0 and block != 0 else 1,
308
- shortcut=shortcut,
309
- if_first=block == i == 0))
310
- block_list.append(basic_block)
311
- shortcut = True
312
- self.stage_list.append(block_list)
313
-
314
- self.pretrained = pretrained
315
- self.init_weight()
316
-
317
- def forward(self, inputs):
318
- feat_list = []
319
- y = self.conv1_1(inputs)
320
- y = self.conv1_2(y)
321
- y = self.conv1_3(y)
322
- feat_list.append(y)
323
-
324
- y = self.pool2d_max(y)
325
-
326
- # A feature list saves the output feature map of each stage.
327
- for stage in self.stage_list:
328
- for block in stage:
329
- y = block(y)
330
- feat_list.append(y)
331
-
332
- return feat_list
333
-
334
- def init_weight(self):
335
- utils.load_pretrained_model(self, self.pretrained)
336
-
337
-
338
- @manager.BACKBONES.add_component
339
- def ResNet18_vd(**args):
340
- model = ResNet_vd(layers=18, **args)
341
- return model
342
-
343
-
344
- def ResNet34_vd(**args):
345
- model = ResNet_vd(layers=34, **args)
346
- return model
347
-
348
-
349
- @manager.BACKBONES.add_component
350
- def ResNet50_vd(**args):
351
- model = ResNet_vd(layers=50, **args)
352
- return model
353
-
354
-
355
- @manager.BACKBONES.add_component
356
- def ResNet101_vd(**args):
357
- model = ResNet_vd(layers=101, **args)
358
- return model
359
-
360
-
361
- def ResNet152_vd(**args):
362
- model = ResNet_vd(layers=152, **args)
363
- return model
364
-
365
-
366
- def ResNet200_vd(**args):
367
- model = ResNet_vd(layers=200, **args)
368
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/components/ui/switch.tsx DELETED
@@ -1,29 +0,0 @@
1
- "use client"
2
-
3
- import * as React from "react"
4
- import * as SwitchPrimitives from "@radix-ui/react-switch"
5
-
6
- import { cn } from "@/lib/utils"
7
-
8
- const Switch = React.forwardRef<
9
- React.ElementRef<typeof SwitchPrimitives.Root>,
10
- React.ComponentPropsWithoutRef<typeof SwitchPrimitives.Root>
11
- >(({ className, ...props }, ref) => (
12
- <SwitchPrimitives.Root
13
- className={cn(
14
- "peer inline-flex h-[24px] w-[44px] shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-stone-400 focus-visible:ring-offset-2 focus-visible:ring-offset-white disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-stone-900 data-[state=unchecked]:bg-stone-200 dark:focus-visible:ring-stone-800 dark:focus-visible:ring-offset-stone-950 dark:data-[state=checked]:bg-stone-50 dark:data-[state=unchecked]:bg-stone-800",
15
- className
16
- )}
17
- {...props}
18
- ref={ref}
19
- >
20
- <SwitchPrimitives.Thumb
21
- className={cn(
22
- "pointer-events-none block h-5 w-5 rounded-full bg-white shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-5 data-[state=unchecked]:translate-x-0 dark:bg-stone-950"
23
- )}
24
- />
25
- </SwitchPrimitives.Root>
26
- ))
27
- Switch.displayName = SwitchPrimitives.Root.displayName
28
-
29
- export { Switch }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/netdissect/statedict.py DELETED
@@ -1,100 +0,0 @@
1
- '''
2
- Utilities for dealing with simple state dicts as npz files instead of pth files.
3
- '''
4
-
5
- import torch
6
- from collections.abc import MutableMapping, Mapping
7
-
8
- def load_from_numpy_dict(model, numpy_dict, prefix='', examples=None):
9
- '''
10
- Loads a model from numpy_dict using load_state_dict.
11
- Converts numpy types to torch types using the current state_dict
12
- of the model to determine types and devices for the tensors.
13
- Supports loading a subdict by prepending the given prefix to all keys.
14
- '''
15
- if prefix:
16
- if not prefix.endswith('.'):
17
- prefix = prefix + '.'
18
- numpy_dict = PrefixSubDict(numpy_dict, prefix)
19
- if examples is None:
20
- exampels = model.state_dict()
21
- torch_state_dict = TorchTypeMatchingDict(numpy_dict, examples)
22
- model.load_state_dict(torch_state_dict)
23
-
24
- def save_to_numpy_dict(model, numpy_dict, prefix=''):
25
- '''
26
- Saves a model by copying tensors to numpy_dict.
27
- Converts torch types to numpy types using `t.detach().cpu().numpy()`.
28
- Supports saving a subdict by prepending the given prefix to all keys.
29
- '''
30
- if prefix:
31
- if not prefix.endswith('.'):
32
- prefix = prefix + '.'
33
- for k, v in model.numpy_dict().items():
34
- if isinstance(v, torch.Tensor):
35
- v = v.detach().cpu().numpy()
36
- numpy_dict[prefix + k] = v
37
-
38
- class TorchTypeMatchingDict(Mapping):
39
- '''
40
- Provides a view of a dict of numpy values as torch tensors, where the
41
- types are converted to match the types and devices in the given
42
- dict of examples.
43
- '''
44
- def __init__(self, data, examples):
45
- self.data = data
46
- self.examples = examples
47
- self.cached_data = {}
48
- def __getitem__(self, key):
49
- if key in self.cached_data:
50
- return self.cached_data[key]
51
- val = self.data[key]
52
- if key not in self.examples:
53
- return val
54
- example = self.examples.get(key, None)
55
- example_type = type(example)
56
- if example is not None and type(val) != example_type:
57
- if isinstance(example, torch.Tensor):
58
- val = torch.from_numpy(val)
59
- else:
60
- val = example_type(val)
61
- if isinstance(example, torch.Tensor):
62
- val = val.to(dtype=example.dtype, device=example.device)
63
- self.cached_data[key] = val
64
- return val
65
- def __iter__(self):
66
- return self.data.keys()
67
- def __len__(self):
68
- return len(self.data)
69
-
70
- class PrefixSubDict(MutableMapping):
71
- '''
72
- Provides a view of the subset of a dict where string keys begin with
73
- the given prefix. The prefix is stripped from all keys of the view.
74
- '''
75
- def __init__(self, data, prefix=''):
76
- self.data = data
77
- self.prefix = prefix
78
- self._cached_keys = None
79
- def __getitem__(self, key):
80
- return self.data[self.prefix + key]
81
- def __setitem__(self, key, value):
82
- pkey = self.prefix + key
83
- if self._cached_keys is not None and pkey not in self.data:
84
- self._cached_keys = None
85
- self.data[pkey] = value
86
- def __delitem__(self, key):
87
- pkey = self.prefix + key
88
- if self._cached_keys is not None and pkey in self.data:
89
- self._cached_keys = None
90
- del self.data[pkey]
91
- def __cached_keys(self):
92
- if self._cached_keys is None:
93
- plen = len(self.prefix)
94
- self._cached_keys = list(k[plen:] for k in self.data
95
- if k.startswith(self.prefix))
96
- return self._cached_keys
97
- def __iter__(self):
98
- return iter(self.__cached_keys())
99
- def __len__(self):
100
- return len(self.__cached_keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Djacon/emotion_detection/files/js/main.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/DragGan/DragGan-Inversion/stylegan_human/training/training_loop.py DELETED
@@ -1,499 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Main training loop."""
10
-
11
- import os
12
- import time
13
- import copy
14
- import json
15
- import pickle
16
- import psutil
17
- import PIL.Image
18
- import numpy as np
19
- import torch
20
- import dnnlib
21
- from torch_utils import misc
22
- from torch_utils import training_stats
23
- from torch_utils.ops import conv2d_gradfix
24
- from torch_utils.ops import grid_sample_gradfix
25
-
26
- import legacy
27
- from metrics import metric_main
28
-
29
- # ----------------------------------------------------------------------------
30
-
31
-
32
- def setup_snapshot_image_grid(training_set, random_seed=0):
33
- rnd = np.random.RandomState(random_seed)
34
- gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
35
- gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
36
-
37
- # No labels => show random subset of training samples.
38
- if not training_set.has_labels:
39
- all_indices = list(range(len(training_set)))
40
- rnd.shuffle(all_indices)
41
- grid_indices = [all_indices[i %
42
- len(all_indices)] for i in range(gw * gh)]
43
-
44
- else:
45
- # Group training samples by label.
46
- label_groups = dict() # label => [idx, ...]
47
- for idx in range(len(training_set)):
48
- label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
49
- if label not in label_groups:
50
- label_groups[label] = []
51
- label_groups[label].append(idx)
52
-
53
- # Reorder.
54
- label_order = sorted(label_groups.keys())
55
- for label in label_order:
56
- rnd.shuffle(label_groups[label])
57
-
58
- # Organize into grid.
59
- grid_indices = []
60
- for y in range(gh):
61
- label = label_order[y % len(label_order)]
62
- indices = label_groups[label]
63
- grid_indices += [indices[x % len(indices)] for x in range(gw)]
64
- label_groups[label] = [
65
- indices[(i + gw) % len(indices)] for i in range(len(indices))]
66
-
67
- # Load data.
68
- images, labels = zip(*[training_set[i] for i in grid_indices])
69
- return (gw, gh), np.stack(images), np.stack(labels)
70
-
71
- # ----------------------------------------------------------------------------
72
-
73
-
74
- def save_image_grid(img, fname, drange, grid_size):
75
- lo, hi = drange
76
- img = np.asarray(img, dtype=np.float32)
77
- img = (img - lo) * (255 / (hi - lo))
78
- img = np.rint(img).clip(0, 255).astype(np.uint8)
79
-
80
- gw, gh = grid_size
81
- _N, C, H, W = img.shape
82
- img = img.reshape([gh, gw, C, H, W])
83
- img = img.transpose(0, 3, 1, 4, 2)
84
- img = img.reshape([gh * H, gw * W, C])
85
-
86
- assert C in [1, 3]
87
- if C == 1:
88
- PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
89
- if C == 3:
90
- PIL.Image.fromarray(img, 'RGB').save(fname)
91
-
92
- # ----------------------------------------------------------------------------
93
-
94
-
95
- def training_loop(
96
- run_dir='.', # Output directory.
97
- training_set_kwargs={}, # Options for training set.
98
- data_loader_kwargs={}, # Options for torch.utils.data.DataLoader.
99
- G_kwargs={}, # Options for generator network.
100
- D_kwargs={}, # Options for discriminator network.
101
- G_opt_kwargs={}, # Options for generator optimizer.
102
- D_opt_kwargs={}, # Options for discriminator optimizer.
103
- # Options for augmentation pipeline. None = disable.
104
- augment_kwargs=None,
105
- loss_kwargs={}, # Options for loss function.
106
- metrics=[], # Metrics to evaluate during training.
107
- random_seed=0, # Global random seed.
108
- num_gpus=1, # Number of GPUs participating in the training.
109
- rank=0, # Rank of the current process in [0, num_gpus[.
110
- # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
111
- batch_size=4,
112
- batch_gpu=4, # Number of samples processed at a time by one GPU.
113
- # Half-life of the exponential moving average (EMA) of generator weights.
114
- ema_kimg=10,
115
- ema_rampup=0.05, # EMA ramp-up coefficient. None = no rampup.
116
- # How often to perform regularization for G? None = disable lazy regularization.
117
- G_reg_interval=None,
118
- # How often to perform regularization for D? None = disable lazy regularization.
119
- D_reg_interval=16,
120
- augment_p=0, # Initial value of augmentation probability.
121
- ada_target=None, # ADA target value. None = fixed p.
122
- ada_interval=4, # How often to perform ADA adjustment?
123
- # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
124
- ada_kimg=500,
125
- # Total length of the training, measured in thousands of real images.
126
- total_kimg=25000,
127
- kimg_per_tick=4, # Progress snapshot interval.
128
- # How often to save image snapshots? None = disable.
129
- image_snapshot_ticks=50,
130
- # How often to save network snapshots? None = disable.
131
- network_snapshot_ticks=50,
132
- resume_pkl=None, # Network pickle to resume training from.
133
- resume_kimg=0, # First kimg to report when resuming training.
134
- cudnn_benchmark=True, # Enable torch.backends.cudnn.benchmark?
135
- # Callback function for determining whether to abort training. Must return consistent results across ranks.
136
- abort_fn=None,
137
- # Callback function for updating training progress. Called for all ranks.
138
- progress_fn=None,
139
- ):
140
- # Initialize.
141
- start_time = time.time()
142
- device = torch.device('cuda', rank)
143
- np.random.seed(random_seed * num_gpus + rank)
144
- torch.manual_seed(random_seed * num_gpus + rank)
145
- # Improves training speed.
146
- torch.backends.cudnn.benchmark = cudnn_benchmark
147
- # Improves numerical accuracy.
148
- torch.backends.cuda.matmul.allow_tf32 = False
149
- # Improves numerical accuracy.
150
- torch.backends.cudnn.allow_tf32 = False
151
- # Improves training speed.
152
- conv2d_gradfix.enabled = True
153
- # Avoids errors with the augmentation pipe.
154
- grid_sample_gradfix.enabled = True
155
-
156
- # Load training set.
157
- if rank == 0:
158
- print('Loading training set...')
159
- training_set = dnnlib.util.construct_class_by_name(
160
- **training_set_kwargs) # subclass of training.dataset.Dataset
161
- training_set_sampler = misc.InfiniteSampler(
162
- dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
163
- training_set_iterator = iter(torch.utils.data.DataLoader(
164
- dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
165
- if rank == 0:
166
- print()
167
- print('Num images: ', len(training_set))
168
- print('Image shape:', training_set.image_shape)
169
- print('Label shape:', training_set.label_shape)
170
- print()
171
-
172
- # Construct networks.
173
- if rank == 0:
174
- print('Constructing networks...')
175
- common_kwargs = dict(c_dim=training_set.label_dim,
176
- img_resolution=training_set.resolution, img_channels=training_set.num_channels)
177
- G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train(
178
- ).requires_grad_(False).to(device) # subclass of torch.nn.Module
179
- D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train(
180
- ).requires_grad_(False).to(device) # subclass of torch.nn.Module
181
- G_ema = copy.deepcopy(G).eval()
182
-
183
- # Resume from existing pickle.
184
- if (resume_pkl is not None) and (rank == 0):
185
- print(f'Resuming from "{resume_pkl}"')
186
- with dnnlib.util.open_url(resume_pkl) as f:
187
- resume_data = legacy.load_network_pkl(f)
188
- for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
189
- misc.copy_params_and_buffers(
190
- resume_data[name], module, require_all=False)
191
-
192
- # Print network summary tables.
193
- if rank == 0:
194
- z = torch.empty([batch_gpu, G.z_dim], device=device)
195
- c = torch.empty([batch_gpu, G.c_dim], device=device)
196
- img = misc.print_module_summary(G, [z, c])
197
- misc.print_module_summary(D, [img, c])
198
-
199
- # Setup augmentation.
200
- if rank == 0:
201
- print('Setting up augmentation...')
202
- augment_pipe = None
203
- ada_stats = None
204
- if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
205
- augment_pipe = dnnlib.util.construct_class_by_name(
206
- **augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
207
- augment_pipe.p.copy_(torch.as_tensor(augment_p))
208
- if ada_target is not None:
209
- ada_stats = training_stats.Collector(regex='Loss/signs/real')
210
-
211
- # Distribute across GPUs.
212
- if rank == 0:
213
- print(f'Distributing across {num_gpus} GPUs...')
214
- for module in [G, D, G_ema, augment_pipe]:
215
- if module is not None and num_gpus > 1:
216
- for param in misc.params_and_buffers(module):
217
- torch.distributed.broadcast(param, src=0)
218
-
219
- # Setup training phases.
220
- if rank == 0:
221
- print('Setting up training phases...')
222
- loss = dnnlib.util.construct_class_by_name(
223
- device=device, G=G, D=D, augment_pipe=augment_pipe, **loss_kwargs) # subclass of training.loss.Loss
224
- phases = []
225
- for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
226
- if reg_interval is None:
227
- opt = dnnlib.util.construct_class_by_name(
228
- params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
229
- phases += [dnnlib.EasyDict(name=name+'both',
230
- module=module, opt=opt, interval=1)]
231
- else: # Lazy regularization.
232
- mb_ratio = reg_interval / (reg_interval + 1)
233
- opt_kwargs = dnnlib.EasyDict(opt_kwargs)
234
- opt_kwargs.lr = opt_kwargs.lr * mb_ratio
235
- opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
236
- opt = dnnlib.util.construct_class_by_name(
237
- module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
238
- phases += [dnnlib.EasyDict(name=name+'main',
239
- module=module, opt=opt, interval=1)]
240
- phases += [dnnlib.EasyDict(name=name+'reg',
241
- module=module, opt=opt, interval=reg_interval)]
242
- for phase in phases:
243
- phase.start_event = None
244
- phase.end_event = None
245
- if rank == 0:
246
- phase.start_event = torch.cuda.Event(enable_timing=True)
247
- phase.end_event = torch.cuda.Event(enable_timing=True)
248
-
249
- # Export sample images.
250
- grid_size = None
251
- grid_z = None
252
- grid_c = None
253
- if rank == 0:
254
- print('Exporting sample images...')
255
- grid_size, images, labels = setup_snapshot_image_grid(
256
- training_set=training_set)
257
- save_image_grid(images, os.path.join(run_dir, 'reals.png'),
258
- drange=[0, 255], grid_size=grid_size)
259
- grid_z = torch.randn([labels.shape[0], G.z_dim],
260
- device=device).split(batch_gpu)
261
- grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
262
- images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu()
263
- for z, c in zip(grid_z, grid_c)]).numpy()
264
- save_image_grid(images, os.path.join(
265
- run_dir, 'fakes_init.png'), drange=[-1, 1], grid_size=grid_size)
266
-
267
- # Initialize logs.
268
- if rank == 0:
269
- print('Initializing logs...')
270
- stats_collector = training_stats.Collector(regex='.*')
271
- stats_metrics = dict()
272
- stats_jsonl = None
273
- stats_tfevents = None
274
- if rank == 0:
275
- stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
276
- try:
277
- import torch.utils.tensorboard as tensorboard
278
- stats_tfevents = tensorboard.SummaryWriter(run_dir)
279
- except ImportError as err:
280
- print('Skipping tfevents export:', err)
281
-
282
- # Train.
283
- if rank == 0:
284
- print(f'Training for {total_kimg} kimg...')
285
- print()
286
- cur_nimg = resume_kimg * 1000
287
- cur_tick = 0
288
- tick_start_nimg = cur_nimg
289
- tick_start_time = time.time()
290
- maintenance_time = tick_start_time - start_time
291
- batch_idx = 0
292
- if progress_fn is not None:
293
- progress_fn(0, total_kimg)
294
- while True:
295
-
296
- # Fetch training data.
297
- with torch.autograd.profiler.record_function('data_fetch'):
298
- phase_real_img, phase_real_c = next(training_set_iterator)
299
- phase_real_img = (phase_real_img.to(device).to(
300
- torch.float32) / 127.5 - 1).split(batch_gpu)
301
- phase_real_c = phase_real_c.to(device).split(batch_gpu)
302
- all_gen_z = torch.randn(
303
- [len(phases) * batch_size, G.z_dim], device=device)
304
- all_gen_z = [phase_gen_z.split(
305
- batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
306
- all_gen_c = [training_set.get_label(np.random.randint(
307
- len(training_set))) for _ in range(len(phases) * batch_size)]
308
- all_gen_c = torch.from_numpy(
309
- np.stack(all_gen_c)).pin_memory().to(device)
310
- all_gen_c = [phase_gen_c.split(
311
- batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
312
-
313
- # Execute training phases.
314
- for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):
315
- if batch_idx % phase.interval != 0:
316
- continue
317
- if phase.start_event is not None:
318
- phase.start_event.record(torch.cuda.current_stream(device))
319
-
320
- # Accumulate gradients.
321
- phase.opt.zero_grad(set_to_none=True)
322
- phase.module.requires_grad_(True)
323
- for real_img, real_c, gen_z, gen_c in zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c):
324
- loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c,
325
- gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg)
326
- phase.module.requires_grad_(False)
327
-
328
- # Update weights.
329
- with torch.autograd.profiler.record_function(phase.name + '_opt'):
330
- params = [param for param in phase.module.parameters()
331
- if param.grad is not None]
332
- if len(params) > 0:
333
- flat = torch.cat([param.grad.flatten()
334
- for param in params])
335
- if num_gpus > 1:
336
- torch.distributed.all_reduce(flat)
337
- flat /= num_gpus
338
- misc.nan_to_num(flat, nan=0, posinf=1e5,
339
- neginf=-1e5, out=flat)
340
- grads = flat.split([param.numel() for param in params])
341
- for param, grad in zip(params, grads):
342
- param.grad = grad.reshape(param.shape)
343
- phase.opt.step()
344
-
345
- # Phase done.
346
- if phase.end_event is not None:
347
- phase.end_event.record(torch.cuda.current_stream(device))
348
-
349
- # Update G_ema.
350
- with torch.autograd.profiler.record_function('Gema'):
351
- ema_nimg = ema_kimg * 1000
352
- if ema_rampup is not None:
353
- ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
354
- ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
355
- for p_ema, p in zip(G_ema.parameters(), G.parameters()):
356
- p_ema.copy_(p.lerp(p_ema, ema_beta))
357
- for b_ema, b in zip(G_ema.buffers(), G.buffers()):
358
- b_ema.copy_(b)
359
-
360
- # Update state.
361
- cur_nimg += batch_size
362
- batch_idx += 1
363
-
364
- # Execute ADA heuristic.
365
- if (ada_stats is not None) and (batch_idx % ada_interval == 0):
366
- ada_stats.update()
367
- adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * \
368
- (batch_size * ada_interval) / (ada_kimg * 1000)
369
- augment_pipe.p.copy_(
370
- (augment_pipe.p + adjust).max(misc.constant(0, device=device)))
371
-
372
- # Perform maintenance tasks once per tick.
373
- done = (cur_nimg >= total_kimg * 1000)
374
- if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
375
- continue
376
-
377
- # Print status line, accumulating the same information in training_stats.
378
- tick_end_time = time.time()
379
- fields = []
380
- fields += [
381
- f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
382
- fields += [
383
- f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
384
- fields += [
385
- f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
386
- fields += [
387
- f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
388
- fields += [
389
- f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
390
- fields += [
391
- f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
392
- fields += [
393
- f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
394
- fields += [
395
- f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
396
- fields += [
397
- f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
398
- torch.cuda.reset_peak_memory_stats()
399
- fields += [
400
- f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
401
- training_stats.report0('Timing/total_hours',
402
- (tick_end_time - start_time) / (60 * 60))
403
- training_stats.report0('Timing/total_days',
404
- (tick_end_time - start_time) / (24 * 60 * 60))
405
- if rank == 0:
406
- print(' '.join(fields))
407
-
408
- # Check for abort.
409
- if (not done) and (abort_fn is not None) and abort_fn():
410
- done = True
411
- if rank == 0:
412
- print()
413
- print('Aborting...')
414
-
415
- # Save image snapshot.
416
- if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
417
- images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu()
418
- for z, c in zip(grid_z, grid_c)]).numpy()
419
- save_image_grid(images, os.path.join(
420
- run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1, 1], grid_size=grid_size)
421
-
422
- # Save network snapshot.
423
- snapshot_pkl = None
424
- snapshot_data = None
425
- if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
426
- snapshot_data = dict(G=G, D=D, G_ema=G_ema, augment_pipe=augment_pipe,
427
- training_set_kwargs=dict(training_set_kwargs))
428
- for key, value in snapshot_data.items():
429
- if isinstance(value, torch.nn.Module):
430
- value = copy.deepcopy(value).eval().requires_grad_(False)
431
- if num_gpus > 1:
432
- misc.check_ddp_consistency(
433
- value, ignore_regex=r'.*\.[^.]+_(avg|ema)')
434
- for param in misc.params_and_buffers(value):
435
- torch.distributed.broadcast(param, src=0)
436
- snapshot_data[key] = value.cpu()
437
- del value # conserve memory
438
- snapshot_pkl = os.path.join(
439
- run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
440
- if rank == 0:
441
- with open(snapshot_pkl, 'wb') as f:
442
- pickle.dump(snapshot_data, f)
443
-
444
- # Evaluate metrics.
445
- if (snapshot_data is not None) and (len(metrics) > 0):
446
- if rank == 0:
447
- print('Evaluating metrics...')
448
- for metric in metrics:
449
- result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
450
- dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
451
- if rank == 0:
452
- metric_main.report_metric(
453
- result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
454
- stats_metrics.update(result_dict.results)
455
- del snapshot_data # conserve memory
456
-
457
- # Collect statistics.
458
- for phase in phases:
459
- value = []
460
- if (phase.start_event is not None) and (phase.end_event is not None):
461
- phase.end_event.synchronize()
462
- value = phase.start_event.elapsed_time(phase.end_event)
463
- training_stats.report0('Timing/' + phase.name, value)
464
- stats_collector.update()
465
- stats_dict = stats_collector.as_dict()
466
-
467
- # Update logs.
468
- timestamp = time.time()
469
- if stats_jsonl is not None:
470
- fields = dict(stats_dict, timestamp=timestamp)
471
- stats_jsonl.write(json.dumps(fields) + '\n')
472
- stats_jsonl.flush()
473
- if stats_tfevents is not None:
474
- global_step = int(cur_nimg / 1e3)
475
- walltime = timestamp - start_time
476
- for name, value in stats_dict.items():
477
- stats_tfevents.add_scalar(
478
- name, value.mean, global_step=global_step, walltime=walltime)
479
- for name, value in stats_metrics.items():
480
- stats_tfevents.add_scalar(
481
- f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
482
- stats_tfevents.flush()
483
- if progress_fn is not None:
484
- progress_fn(cur_nimg // 1000, total_kimg)
485
-
486
- # Update state.
487
- cur_tick += 1
488
- tick_start_nimg = cur_nimg
489
- tick_start_time = time.time()
490
- maintenance_time = tick_start_time - tick_end_time
491
- if done:
492
- break
493
-
494
- # Done.
495
- if rank == 0:
496
- print()
497
- print('Exiting...')
498
-
499
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/tutorials/transtrack/main_track.py DELETED
@@ -1,375 +0,0 @@
1
- # Modified by Peize Sun, Rufeng Zhang
2
- # ------------------------------------------------------------------------
3
- # Deformable DETR
4
- # Copyright (c) 2020 SenseTime. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
- # Modified from DETR (https://github.com/facebookresearch/detr)
8
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
9
- # ------------------------------------------------------------------------
10
- import argparse
11
- import datetime
12
- import json
13
- import random
14
- import time
15
- from pathlib import Path
16
-
17
- import numpy as np
18
- import torch
19
- from torch.utils.data import DataLoader
20
- import datasets
21
- import util.misc as utils
22
- import datasets.samplers as samplers
23
- from datasets import build_dataset, get_coco_api_from_dataset
24
- from engine_track import evaluate, train_one_epoch, evaluate_track
25
- from models import build_tracktrain_model, build_tracktest_model, build_model
26
- from models import Tracker
27
- from models import save_track
28
- from mot_online.byte_tracker import BYTETracker
29
-
30
- from collections import defaultdict
31
-
32
-
33
- def get_args_parser():
34
- parser = argparse.ArgumentParser('Deformable DETR Detector', add_help=False)
35
- parser.add_argument('--lr', default=2e-4, type=float)
36
- parser.add_argument('--lr_backbone_names', default=["backbone.0"], type=str, nargs='+')
37
- parser.add_argument('--lr_backbone', default=2e-5, type=float)
38
- parser.add_argument('--lr_linear_proj_names', default=['reference_points', 'sampling_offsets'], type=str, nargs='+')
39
- parser.add_argument('--lr_linear_proj_mult', default=0.1, type=float)
40
- parser.add_argument('--batch_size', default=1, type=int)
41
- parser.add_argument('--weight_decay', default=1e-4, type=float)
42
- parser.add_argument('--epochs', default=50, type=int)
43
- parser.add_argument('--lr_drop', default=40, type=int)
44
- parser.add_argument('--lr_drop_epochs', default=None, type=int, nargs='+')
45
- parser.add_argument('--clip_max_norm', default=0.1, type=float,
46
- help='gradient clipping max norm')
47
-
48
- parser.add_argument('--sgd', action='store_true')
49
-
50
- # Variants of Deformable DETR
51
- parser.add_argument('--with_box_refine', default=True, action='store_true')
52
- parser.add_argument('--two_stage', default=False, action='store_true')
53
-
54
- # Model parameters
55
- parser.add_argument('--frozen_weights', type=str, default=None,
56
- help="Path to the pretrained model. If set, only the mask head will be trained")
57
-
58
- # * Backbone
59
- parser.add_argument('--backbone', default='resnet50', type=str,
60
- help="Name of the convolutional backbone to use")
61
- parser.add_argument('--dilation', action='store_true',
62
- help="If true, we replace stride with dilation in the last convolutional block (DC5)")
63
- parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
64
- help="Type of positional embedding to use on top of the image features")
65
- parser.add_argument('--position_embedding_scale', default=2 * np.pi, type=float,
66
- help="position / size * scale")
67
- parser.add_argument('--num_feature_levels', default=4, type=int, help='number of feature levels')
68
-
69
- # * Transformer
70
- parser.add_argument('--enc_layers', default=6, type=int,
71
- help="Number of encoding layers in the transformer")
72
- parser.add_argument('--dec_layers', default=6, type=int,
73
- help="Number of decoding layers in the transformer")
74
- parser.add_argument('--dim_feedforward', default=1024, type=int,
75
- help="Intermediate size of the feedforward layers in the transformer blocks")
76
- parser.add_argument('--hidden_dim', default=256, type=int,
77
- help="Size of the embeddings (dimension of the transformer)")
78
- parser.add_argument('--dropout', default=0.1, type=float,
79
- help="Dropout applied in the transformer")
80
- parser.add_argument('--nheads', default=8, type=int,
81
- help="Number of attention heads inside the transformer's attentions")
82
- parser.add_argument('--num_queries', default=500, type=int,
83
- help="Number of query slots")
84
- parser.add_argument('--dec_n_points', default=4, type=int)
85
- parser.add_argument('--enc_n_points', default=4, type=int)
86
-
87
- # * Segmentation
88
- parser.add_argument('--masks', action='store_true',
89
- help="Train segmentation head if the flag is provided")
90
-
91
- # Loss
92
- parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
93
- help="Disables auxiliary decoding losses (loss at each layer)")
94
-
95
- # * Matcher
96
- parser.add_argument('--set_cost_class', default=2, type=float,
97
- help="Class coefficient in the matching cost")
98
- parser.add_argument('--set_cost_bbox', default=5, type=float,
99
- help="L1 box coefficient in the matching cost")
100
- parser.add_argument('--set_cost_giou', default=2, type=float,
101
- help="giou box coefficient in the matching cost")
102
-
103
- # * Loss coefficients
104
- parser.add_argument('--mask_loss_coef', default=1, type=float)
105
- parser.add_argument('--dice_loss_coef', default=1, type=float)
106
- parser.add_argument('--cls_loss_coef', default=2, type=float)
107
- parser.add_argument('--bbox_loss_coef', default=5, type=float)
108
- parser.add_argument('--giou_loss_coef', default=2, type=float)
109
- parser.add_argument('--focal_alpha', default=0.25, type=float)
110
- parser.add_argument('--id_loss_coef', default=1, type=float)
111
-
112
- # dataset parameters
113
- parser.add_argument('--dataset_file', default='coco')
114
- parser.add_argument('--coco_path', default='./data/coco', type=str)
115
- parser.add_argument('--coco_panoptic_path', type=str)
116
- parser.add_argument('--remove_difficult', action='store_true')
117
-
118
- parser.add_argument('--output_dir', default='',
119
- help='path where to save, empty for no saving')
120
- parser.add_argument('--device', default='cuda',
121
- help='device to use for training / testing')
122
- parser.add_argument('--seed', default=42, type=int)
123
- parser.add_argument('--resume', default='', help='resume from checkpoint')
124
- parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
125
- help='start epoch')
126
- parser.add_argument('--eval', action='store_true')
127
- parser.add_argument('--num_workers', default=2, type=int)
128
- parser.add_argument('--cache_mode', default=False, action='store_true', help='whether to cache images on memory')
129
-
130
- # PyTorch checkpointing for saving memory (torch.utils.checkpoint.checkpoint)
131
- parser.add_argument('--checkpoint_enc_ffn', default=False, action='store_true')
132
- parser.add_argument('--checkpoint_dec_ffn', default=False, action='store_true')
133
-
134
- # appended for track.
135
- parser.add_argument('--track_train_split', default='train', type=str)
136
- parser.add_argument('--track_eval_split', default='val', type=str)
137
- parser.add_argument('--track_thresh', default=0.4, type=float)
138
- parser.add_argument('--reid_shared', default=False, type=bool)
139
- parser.add_argument('--reid_dim', default=128, type=int)
140
- parser.add_argument('--num_ids', default=360, type=int)
141
-
142
-
143
- # detector for track.
144
- parser.add_argument('--det_val', default=False, action='store_true')
145
-
146
-
147
- return parser
148
-
149
-
150
- def main(args):
151
- utils.init_distributed_mode(args)
152
- print("git:\n {}\n".format(utils.get_sha()))
153
-
154
- if args.frozen_weights is not None:
155
- assert args.masks, "Frozen training is meant for segmentation only"
156
- print(args)
157
-
158
- device = torch.device(args.device)
159
-
160
- # fix the seed for reproducibility
161
- seed = args.seed + utils.get_rank()
162
- torch.manual_seed(seed)
163
- np.random.seed(seed)
164
- random.seed(seed)
165
-
166
- if args.det_val:
167
- assert args.eval, 'only support eval mode of detector for track'
168
- model, criterion, postprocessors = build_model(args)
169
- elif args.eval:
170
- model, criterion, postprocessors = build_tracktest_model(args)
171
- else:
172
- model, criterion, postprocessors = build_tracktrain_model(args)
173
-
174
- model.to(device)
175
-
176
- model_without_ddp = model
177
- n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
178
- print('number of params:', n_parameters)
179
-
180
- dataset_train = build_dataset(image_set=args.track_train_split, args=args)
181
- dataset_val = build_dataset(image_set=args.track_eval_split, args=args)
182
-
183
- if args.distributed:
184
- if args.cache_mode:
185
- sampler_train = samplers.NodeDistributedSampler(dataset_train)
186
- sampler_val = samplers.NodeDistributedSampler(dataset_val, shuffle=False)
187
- else:
188
- sampler_train = samplers.DistributedSampler(dataset_train)
189
- sampler_val = samplers.DistributedSampler(dataset_val, shuffle=False)
190
- else:
191
- sampler_train = torch.utils.data.RandomSampler(dataset_train)
192
- sampler_val = torch.utils.data.SequentialSampler(dataset_val)
193
-
194
- batch_sampler_train = torch.utils.data.BatchSampler(
195
- sampler_train, args.batch_size, drop_last=True)
196
-
197
- data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
198
- collate_fn=utils.collate_fn, num_workers=args.num_workers,
199
- pin_memory=True)
200
- data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
201
- drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers,
202
- pin_memory=True)
203
-
204
- # lr_backbone_names = ["backbone.0", "backbone.neck", "input_proj", "transformer.encoder"]
205
- def match_name_keywords(n, name_keywords):
206
- out = False
207
- for b in name_keywords:
208
- if b in n:
209
- out = True
210
- break
211
- return out
212
-
213
- for n, p in model_without_ddp.named_parameters():
214
- print(n)
215
-
216
- param_dicts = [
217
- {
218
- "params":
219
- [p for n, p in model_without_ddp.named_parameters()
220
- if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
221
- "lr": args.lr,
222
- },
223
- {
224
- "params": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],
225
- "lr": args.lr_backbone,
226
- },
227
- {
228
- "params": [p for n, p in model_without_ddp.named_parameters() if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],
229
- "lr": args.lr * args.lr_linear_proj_mult,
230
- }
231
- ]
232
- if args.sgd:
233
- optimizer = torch.optim.SGD(param_dicts, lr=args.lr, momentum=0.9,
234
- weight_decay=args.weight_decay)
235
- else:
236
- optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
237
- weight_decay=args.weight_decay)
238
- lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
239
-
240
- if args.distributed:
241
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
242
- model_without_ddp = model.module
243
-
244
- if args.dataset_file == "coco_panoptic":
245
- # We also evaluate AP during panoptic training, on original coco DS
246
- coco_val = datasets.coco.build("val", args)
247
- base_ds = get_coco_api_from_dataset(coco_val)
248
- else:
249
- base_ds = get_coco_api_from_dataset(dataset_val)
250
-
251
- if args.frozen_weights is not None:
252
- checkpoint = torch.load(args.frozen_weights, map_location='cpu')
253
- model_without_ddp.detr.load_state_dict(checkpoint['model'])
254
-
255
- output_dir = Path(args.output_dir)
256
- if args.resume:
257
- if args.resume.startswith('https'):
258
- checkpoint = torch.hub.load_state_dict_from_url(
259
- args.resume, map_location='cpu', check_hash=True)
260
- else:
261
- checkpoint = torch.load(args.resume, map_location='cpu')
262
- missing_keys, unexpected_keys = model_without_ddp.load_state_dict(checkpoint['model'], strict=False)
263
- unexpected_keys = [k for k in unexpected_keys if not (k.endswith('total_params') or k.endswith('total_ops'))]
264
- if len(missing_keys) > 0:
265
- print('Missing Keys: {}'.format(missing_keys))
266
- if len(unexpected_keys) > 0:
267
- print('Unexpected Keys: {}'.format(unexpected_keys))
268
- if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
269
- import copy
270
- p_groups = copy.deepcopy(optimizer.param_groups)
271
- optimizer.load_state_dict(checkpoint['optimizer'])
272
- for pg, pg_old in zip(optimizer.param_groups, p_groups):
273
- pg['lr'] = pg_old['lr']
274
- pg['initial_lr'] = pg_old['initial_lr']
275
- print(optimizer.param_groups)
276
- lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
277
- # todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance).
278
- args.override_resumed_lr_drop = True
279
- if args.override_resumed_lr_drop:
280
- print('Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.')
281
- lr_scheduler.step_size = args.lr_drop
282
- lr_scheduler.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
283
- lr_scheduler.step(lr_scheduler.last_epoch)
284
- args.start_epoch = checkpoint['epoch'] + 1
285
- # check the resumed model
286
- # if not args.eval:
287
- # test_stats, coco_evaluator, _ = evaluate(
288
- # model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
289
- # )
290
-
291
- if args.eval:
292
- assert args.batch_size == 1, print("Now only support 1.")
293
- # tracker = MOTXTracker(score_thresh=args.track_thresh)
294
- # test_stats, coco_evaluator, res_tracks = evaluate(model, criterion, postprocessors, data_loader_val,
295
- # base_ds, device, args.output_dir, tracker=tracker,
296
- # phase='eval', det_val=args.det_val)
297
- tracker = BYTETracker(args)
298
- test_stats, coco_evaluator, res_tracks = evaluate_track(args, model, criterion, postprocessors, data_loader_val,
299
- base_ds, device, args.output_dir, tracker=tracker,
300
- phase='eval', det_val=args.det_val)
301
- if args.output_dir:
302
- utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
303
- if res_tracks is not None:
304
- print("Creating video index for {}.".format(args.dataset_file))
305
- video_to_images = defaultdict(list)
306
- video_names = defaultdict()
307
- for _, info in dataset_val.coco.imgs.items():
308
- video_to_images[info["video_id"]].append({"image_id": info["id"],
309
- "frame_id": info["frame_id"]})
310
- video_name = info["file_name"].split("/")[0]
311
- if video_name not in video_names:
312
- video_names[info["video_id"]] = video_name
313
- assert len(video_to_images) == len(video_names)
314
- # save mot results.
315
- save_track(res_tracks, args.output_dir, video_to_images, video_names, args.track_eval_split)
316
-
317
- return
318
-
319
- print("Start training")
320
- start_time = time.time()
321
- for epoch in range(args.start_epoch, args.epochs):
322
- if args.distributed:
323
- sampler_train.set_epoch(epoch)
324
- train_stats = train_one_epoch(
325
- model, criterion, data_loader_train, optimizer, device, epoch, args.clip_max_norm)
326
- lr_scheduler.step()
327
- if args.output_dir:
328
- checkpoint_paths = [output_dir / 'checkpoint.pth']
329
- # extra checkpoint before LR drop and every 5 epochs
330
- if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 5 == 0:
331
- checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
332
- for checkpoint_path in checkpoint_paths:
333
- utils.save_on_master({
334
- 'model': model_without_ddp.state_dict(),
335
- 'optimizer': optimizer.state_dict(),
336
- 'lr_scheduler': lr_scheduler.state_dict(),
337
- 'epoch': epoch,
338
- 'args': args,
339
- }, checkpoint_path)
340
- if epoch % 10 == 0 or epoch > args.epochs - 5:
341
- test_stats, coco_evaluator, _ = evaluate(
342
- model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir,
343
- )
344
-
345
- log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
346
- **{f'test_{k}': v for k, v in test_stats.items()},
347
- 'epoch': epoch,
348
- 'n_parameters': n_parameters}
349
-
350
- if args.output_dir and utils.is_main_process():
351
- with (output_dir / "log.txt").open("a") as f:
352
- f.write(json.dumps(log_stats) + "\n")
353
-
354
- # for evaluation logs
355
- if coco_evaluator is not None:
356
- (output_dir / 'eval').mkdir(exist_ok=True)
357
- if "bbox" in coco_evaluator.coco_eval:
358
- filenames = ['latest.pth']
359
- if epoch % 50 == 0:
360
- filenames.append(f'{epoch:03}.pth')
361
- for name in filenames:
362
- torch.save(coco_evaluator.coco_eval["bbox"].eval,
363
- output_dir / "eval" / name)
364
-
365
- total_time = time.time() - start_time
366
- total_time_str = str(datetime.timedelta(seconds=int(total_time)))
367
- print('Training time {}'.format(total_time_str))
368
-
369
-
370
- if __name__ == '__main__':
371
- parser = argparse.ArgumentParser('Deformable DETR training and evaluation script', parents=[get_args_parser()])
372
- args = parser.parse_args()
373
- if args.output_dir:
374
- Path(args.output_dir).mkdir(parents=True, exist_ok=True)
375
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EDGAhab/Paimon-Talking/utils.py DELETED
@@ -1,258 +0,0 @@
1
- import os
2
- import glob
3
- import sys
4
- import argparse
5
- import logging
6
- import json
7
- import subprocess
8
- import numpy as np
9
- from scipy.io.wavfile import read
10
- import torch
11
-
12
- MATPLOTLIB_FLAG = False
13
-
14
- logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
15
- logger = logging
16
-
17
-
18
- def load_checkpoint(checkpoint_path, model, optimizer=None):
19
- assert os.path.isfile(checkpoint_path)
20
- checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
21
- iteration = checkpoint_dict['iteration']
22
- learning_rate = checkpoint_dict['learning_rate']
23
- if optimizer is not None:
24
- optimizer.load_state_dict(checkpoint_dict['optimizer'])
25
- saved_state_dict = checkpoint_dict['model']
26
- if hasattr(model, 'module'):
27
- state_dict = model.module.state_dict()
28
- else:
29
- state_dict = model.state_dict()
30
- new_state_dict= {}
31
- for k, v in state_dict.items():
32
- try:
33
- new_state_dict[k] = saved_state_dict[k]
34
- except:
35
- logger.info("%s is not in the checkpoint" % k)
36
- new_state_dict[k] = v
37
- if hasattr(model, 'module'):
38
- model.module.load_state_dict(new_state_dict)
39
- else:
40
- model.load_state_dict(new_state_dict)
41
- logger.info("Loaded checkpoint '{}' (iteration {})" .format(
42
- checkpoint_path, iteration))
43
- return model, optimizer, learning_rate, iteration
44
-
45
-
46
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
47
- logger.info("Saving model and optimizer state at iteration {} to {}".format(
48
- iteration, checkpoint_path))
49
- if hasattr(model, 'module'):
50
- state_dict = model.module.state_dict()
51
- else:
52
- state_dict = model.state_dict()
53
- torch.save({'model': state_dict,
54
- 'iteration': iteration,
55
- 'optimizer': optimizer.state_dict(),
56
- 'learning_rate': learning_rate}, checkpoint_path)
57
-
58
-
59
- def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050):
60
- for k, v in scalars.items():
61
- writer.add_scalar(k, v, global_step)
62
- for k, v in histograms.items():
63
- writer.add_histogram(k, v, global_step)
64
- for k, v in images.items():
65
- writer.add_image(k, v, global_step, dataformats='HWC')
66
- for k, v in audios.items():
67
- writer.add_audio(k, v, global_step, audio_sampling_rate)
68
-
69
-
70
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
71
- f_list = glob.glob(os.path.join(dir_path, regex))
72
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
73
- x = f_list[-1]
74
- print(x)
75
- return x
76
-
77
-
78
- def plot_spectrogram_to_numpy(spectrogram):
79
- global MATPLOTLIB_FLAG
80
- if not MATPLOTLIB_FLAG:
81
- import matplotlib
82
- matplotlib.use("Agg")
83
- MATPLOTLIB_FLAG = True
84
- mpl_logger = logging.getLogger('matplotlib')
85
- mpl_logger.setLevel(logging.WARNING)
86
- import matplotlib.pylab as plt
87
- import numpy as np
88
-
89
- fig, ax = plt.subplots(figsize=(10,2))
90
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
91
- interpolation='none')
92
- plt.colorbar(im, ax=ax)
93
- plt.xlabel("Frames")
94
- plt.ylabel("Channels")
95
- plt.tight_layout()
96
-
97
- fig.canvas.draw()
98
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
99
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
100
- plt.close()
101
- return data
102
-
103
-
104
- def plot_alignment_to_numpy(alignment, info=None):
105
- global MATPLOTLIB_FLAG
106
- if not MATPLOTLIB_FLAG:
107
- import matplotlib
108
- matplotlib.use("Agg")
109
- MATPLOTLIB_FLAG = True
110
- mpl_logger = logging.getLogger('matplotlib')
111
- mpl_logger.setLevel(logging.WARNING)
112
- import matplotlib.pylab as plt
113
- import numpy as np
114
-
115
- fig, ax = plt.subplots(figsize=(6, 4))
116
- im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
117
- interpolation='none')
118
- fig.colorbar(im, ax=ax)
119
- xlabel = 'Decoder timestep'
120
- if info is not None:
121
- xlabel += '\n\n' + info
122
- plt.xlabel(xlabel)
123
- plt.ylabel('Encoder timestep')
124
- plt.tight_layout()
125
-
126
- fig.canvas.draw()
127
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
128
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
129
- plt.close()
130
- return data
131
-
132
-
133
- def load_wav_to_torch(full_path):
134
- sampling_rate, data = read(full_path)
135
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
136
-
137
-
138
- def load_filepaths_and_text(filename, split="|"):
139
- with open(filename, encoding='utf-8') as f:
140
- filepaths_and_text = [line.strip().split(split) for line in f]
141
- return filepaths_and_text
142
-
143
-
144
- def get_hparams(init=True):
145
- parser = argparse.ArgumentParser()
146
- parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
147
- help='JSON file for configuration')
148
- parser.add_argument('-m', '--model', type=str, required=True,
149
- help='Model name')
150
-
151
- args = parser.parse_args()
152
- model_dir = os.path.join("./logs", args.model)
153
-
154
- if not os.path.exists(model_dir):
155
- os.makedirs(model_dir)
156
-
157
- config_path = args.config
158
- config_save_path = os.path.join(model_dir, "config.json")
159
- if init:
160
- with open(config_path, "r") as f:
161
- data = f.read()
162
- with open(config_save_path, "w") as f:
163
- f.write(data)
164
- else:
165
- with open(config_save_path, "r") as f:
166
- data = f.read()
167
- config = json.loads(data)
168
-
169
- hparams = HParams(**config)
170
- hparams.model_dir = model_dir
171
- return hparams
172
-
173
-
174
- def get_hparams_from_dir(model_dir):
175
- config_save_path = os.path.join(model_dir, "config.json")
176
- with open(config_save_path, "r") as f:
177
- data = f.read()
178
- config = json.loads(data)
179
-
180
- hparams =HParams(**config)
181
- hparams.model_dir = model_dir
182
- return hparams
183
-
184
-
185
- def get_hparams_from_file(config_path):
186
- with open(config_path, "r") as f:
187
- data = f.read()
188
- config = json.loads(data)
189
-
190
- hparams =HParams(**config)
191
- return hparams
192
-
193
-
194
- def check_git_hash(model_dir):
195
- source_dir = os.path.dirname(os.path.realpath(__file__))
196
- if not os.path.exists(os.path.join(source_dir, ".git")):
197
- logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
198
- source_dir
199
- ))
200
- return
201
-
202
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
203
-
204
- path = os.path.join(model_dir, "githash")
205
- if os.path.exists(path):
206
- saved_hash = open(path).read()
207
- if saved_hash != cur_hash:
208
- logger.warn("git hash values are different. {}(saved) != {}(current)".format(
209
- saved_hash[:8], cur_hash[:8]))
210
- else:
211
- open(path, "w").write(cur_hash)
212
-
213
-
214
- def get_logger(model_dir, filename="train.log"):
215
- global logger
216
- logger = logging.getLogger(os.path.basename(model_dir))
217
- logger.setLevel(logging.DEBUG)
218
-
219
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
220
- if not os.path.exists(model_dir):
221
- os.makedirs(model_dir)
222
- h = logging.FileHandler(os.path.join(model_dir, filename))
223
- h.setLevel(logging.DEBUG)
224
- h.setFormatter(formatter)
225
- logger.addHandler(h)
226
- return logger
227
-
228
-
229
- class HParams():
230
- def __init__(self, **kwargs):
231
- for k, v in kwargs.items():
232
- if type(v) == dict:
233
- v = HParams(**v)
234
- self[k] = v
235
-
236
- def keys(self):
237
- return self.__dict__.keys()
238
-
239
- def items(self):
240
- return self.__dict__.items()
241
-
242
- def values(self):
243
- return self.__dict__.values()
244
-
245
- def __len__(self):
246
- return len(self.__dict__)
247
-
248
- def __getitem__(self, key):
249
- return getattr(self, key)
250
-
251
- def __setitem__(self, key, value):
252
- return setattr(self, key, value)
253
-
254
- def __contains__(self, key):
255
- return key in self.__dict__
256
-
257
- def __repr__(self):
258
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EuroPython2022/clickbaitonator/fudge/data.py DELETED
@@ -1,415 +0,0 @@
1
- import random
2
- import math
3
- import os
4
- import pickle
5
- from collections import defaultdict, namedtuple
6
- import string
7
-
8
- os.environ['TOKENIZERS_PARALLELISM'] = 'false' # turn off since we're using multiple threads for loading anyway
9
-
10
- from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline, set_seed, GPT2Tokenizer, GPT2Model
11
- import numpy as np
12
- from tqdm import tqdm
13
- import torch
14
-
15
- from fudge.util import suppress_stdout
16
- from fudge.poetry_util import is_iambic, count_syllables, get_rhymes, get_rhyme_group
17
- from fudge.constants import *
18
-
19
- DatasetInfo = namedtuple('DatasetInfo',
20
- ['index2word', 'word2index', 'total_words', 'vocab', 'glove_embeddings'])
21
- RhymeInfo = namedtuple('RhymeInfo',
22
- ['word2rhyme_group', 'rhyme_group_counts', 'rhyme_groups', 'index2rhyme_group', 'rhyme_group2index', 'total_rhyme_groups'])
23
-
24
- def collate(batch):
25
- pad_id = batch[0][4]
26
- inputs = [b[0] for b in batch]
27
- lengths = torch.LongTensor([b[1] for b in batch])
28
- max_length = lengths.max()
29
- for i in range(len(inputs)):
30
- if len(inputs[i]) < max_length:
31
- inputs[i] = torch.cat([inputs[i], torch.zeros(max_length - len(inputs[i])).long()], dim=0) # actually 0 is fine as pad since it's masked out
32
- inputs = torch.stack(inputs, dim=0)
33
- future_words = torch.LongTensor([b[2] for b in batch]).unsqueeze(0).expand(len(batch), -1).clone() # batch x N=batch
34
- labels = torch.zeros_like(future_words).long()
35
- labels = labels.scatter(1, torch.arange(len(batch)).unsqueeze(1), torch.ones(len(batch)).long().unsqueeze(1)).clone()
36
- log_probs = torch.Tensor([b[3] for b in batch])
37
- classification_labels = [b[5] for b in batch] # batch
38
- if type(classification_labels[0]) == list:
39
- for i in range(len(classification_labels)):
40
- assert len(classification_labels[i]) == lengths[i]
41
- if len(classification_labels[i]) < max_length:
42
- classification_labels[i] = torch.cat([torch.LongTensor(classification_labels[i]), -1 + torch.zeros(max_length - len(classification_labels[i])).long()], dim=0)
43
- else:
44
- classification_labels[i] = torch.LongTensor(classification_labels[i])
45
- classification_labels = torch.stack(classification_labels, dim=0) # batch x seq
46
- else:
47
- assert type(classification_labels[0]) == int
48
- classification_labels = torch.LongTensor(classification_labels) # they're just int labels
49
- syllables_to_go = torch.LongTensor([b[6] for b in batch])
50
- future_word_num_syllables = torch.LongTensor([b[7] for b in batch])
51
- rhyme_group_index = torch.LongTensor([b[8] for b in batch])
52
- return (inputs, lengths, future_words, log_probs, labels, classification_labels, syllables_to_go, future_word_num_syllables, rhyme_group_index)
53
-
54
-
55
- def load_rhyme_info(index2word, vocab):
56
- word2rhyme_group = defaultdict(lambda: UNKNOWN_RHYME_GROUP)
57
- rhyme_group_counts = defaultdict(lambda: 0)
58
- rhyme_groups = set()
59
- for word in index2word:
60
- try:
61
- rhyme_group = get_rhyme_group(word)
62
- word2rhyme_group[word] = rhyme_group
63
- rhyme_group_counts[rhyme_group] += (vocab[word] if word in vocab else 1) # for rare words not in vocab, just use 1
64
- rhyme_groups.add(rhyme_group)
65
- except:
66
- rhyme_group_counts[UNKNOWN_RHYME_GROUP] += (vocab[word] if word in vocab else 1)
67
- index2rhyme_group = [UNKNOWN_RHYME_GROUP] + sorted(list(rhyme_groups))
68
- rhyme_group2index = {s: i for i, s in enumerate(index2rhyme_group)}
69
- total_rhyme_groups = sum(rhyme_group_counts.values())
70
-
71
- return RhymeInfo(word2rhyme_group=dict(word2rhyme_group),
72
- rhyme_group_counts=dict(rhyme_group_counts),
73
- rhyme_groups=rhyme_groups,
74
- index2rhyme_group=index2rhyme_group,
75
- rhyme_group2index=rhyme_group2index,
76
- total_rhyme_groups=total_rhyme_groups)
77
-
78
-
79
- class Dataset:
80
- def __init__(self, args):
81
- print('loading data')
82
- random.seed(args.seed)
83
- self.batch_size = args.batch_size
84
- self.data_dir = args.data_dir
85
- self.topic = args.task == 'topic'
86
- self.formality = args.task == 'formality'
87
- self.iambic = args.task == 'iambic'
88
- self.rhyme = args.task == 'rhyme'
89
- self.newline = args.task == 'newline'
90
-
91
- self.tokenizer = AutoTokenizer.from_pretrained(FORMALITY_MODEL_STRING if self.formality else TOPIC_MODEL_STRING)
92
- self.tokenizer.add_special_tokens({'pad_token': PAD_TOKEN})
93
- self.gpt_pad_id = self.tokenizer.encode(PAD_TOKEN)[0] # actually just the vocab size
94
- sentences = []
95
- self.vocab = defaultdict(lambda: 0)
96
- if self.formality:
97
- self.vocab['placeholder'] = 1 # anything so we don't crash
98
- train, val, test = [], [], []
99
- for category, label in [('formal', 1), ('informal', 0)]:
100
- with open(os.path.join(args.data_dir, 'train', category), 'r') as rf:
101
- for i, line in enumerate(rf):
102
- if len(line) > FORMALITY_MAX_LEN:
103
- line = ' '.join(line.strip()[:FORMALITY_MAX_LEN].split()[:-1]) # cutoff words until below max len; chosen so only ~20 examples affected in dataset
104
- if i < FORMALITY_VAL_SIZE // 2:
105
- val.append((line.strip(), label))
106
- else:
107
- train.append((line.strip(), label))
108
- with open(os.path.join(args.data_dir, 'test', category), 'r') as rf:
109
- for line in rf:
110
- if len(line) > FORMALITY_MAX_LEN:
111
- line = ' '.join(line.strip()[:FORMALITY_MAX_LEN].split()[:-1]) # cutoff words until below max len
112
- test.append((line.strip(), label))
113
- self.splits = {}
114
- self.splits['train'], self.splits['val'], self.splits['test'] = train, val, test
115
- else: # topic / poetry
116
- for root, _, filenames in os.walk(args.data_dir):
117
- for fname in filenames:
118
- with open(os.path.join(root, fname), 'r') as rf:
119
- for line in rf:
120
- sentences.append(line.strip())
121
- for word in line.strip().split(' '):
122
- self.vocab[word] += 1
123
- random.shuffle(sentences)
124
- self.splits = {}
125
- if args.debug:
126
- self.splits['val'] = sentences
127
- self.splits['test'] = sentences
128
- self.splits['train'] = sentences
129
- else:
130
- self.splits['val'] = sentences[:TOPIC_VAL_SIZE]
131
- self.splits['test'] = sentences[TOPIC_VAL_SIZE:2*TOPIC_VAL_SIZE]
132
- self.splits['train'] = sentences[2*TOPIC_VAL_SIZE:]
133
-
134
- if args.dataset_info is not None:
135
- print('loading dataset info from file')
136
- with open(args.dataset_info, 'rb') as rf:
137
- dataset_info = pickle.load(rf)
138
- self.vocab, self.total_words, self.index2word, self.word2index, self.glove_embeddings = \
139
- dataset_info.vocab, dataset_info.total_words, dataset_info.index2word, dataset_info.word2index, dataset_info.glove_embeddings
140
- self.dataset_info = dataset_info
141
- else:
142
- print('generating dataset info from scratch')
143
- words_values = list(self.vocab.items())
144
- words_values = sorted(words_values, key=lambda x: x[1], reverse=True)
145
- if args.glove_file is None:
146
- print('no glove embeddings given')
147
- for word, _ in words_values[VOCAB_SIZE:]: # only use somewhat common tokens
148
- del self.vocab[word]
149
- glove_embeddings = None
150
- else:
151
- print('loading glove embeddings')
152
- glove_embeddings = {}
153
- with open(args.glove_file, 'r') as rf:
154
- for i, line in enumerate(rf):
155
- if i % GLOVE_PRINT_PROGRESS_FREQ == 0:
156
- print(i)
157
- line = line.strip().split()
158
- if len(line) != GLOVE_DIM + 1:
159
- continue # skip multi-word embeddings which are rare anyway
160
- glove_embeddings[line[0]] = [float(x) for x in line[1:]]
161
- for word, _ in words_values:
162
- if word not in glove_embeddings:
163
- del self.vocab[word]
164
- self.total_words = sum(self.vocab.values())
165
- self.index2word = [PAD_TOKEN] + sorted(list(self.vocab.keys()))
166
- self.word2index = {s: i for i, s in enumerate(self.index2word)}
167
- self.vocab = dict(self.vocab) # so we can pickle later
168
- if glove_embeddings is None:
169
- self.glove_embeddings = None
170
- else:
171
- self.glove_embeddings = torch.stack([torch.zeros(GLOVE_DIM)] + [torch.Tensor(glove_embeddings[word]) for word in self.index2word[1:]], dim=0)
172
-
173
- self.dataset_info = DatasetInfo(index2word=self.index2word,
174
- word2index=self.word2index,
175
- total_words=self.total_words,
176
- vocab=self.vocab,
177
- glove_embeddings=self.glove_embeddings)
178
-
179
- if self.rhyme:
180
- if args.rhyme_info is not None:
181
- print('loading rhyme info from file')
182
- with open(args.rhyme_info, 'rb') as rf:
183
- self.rhyme_info = pickle.load(rf)
184
- else:
185
- self.rhyme_info = load_rhyme_info(self.index2word, self.vocab)
186
- self.word2rhyme_group, self.rhyme_group_counts, self.rhyme_groups, self.index2rhyme_group, self.rhyme_group2index, self.total_rhyme_groups = \
187
- defaultdict(lambda: UNKNOWN_RHYME_GROUP, self.rhyme_info.word2rhyme_group), self.rhyme_info.rhyme_group_counts, self.rhyme_info.rhyme_groups, self.rhyme_info.index2rhyme_group, self.rhyme_info.rhyme_group2index, self.rhyme_info.total_rhyme_groups
188
-
189
- print('done loading data')
190
- print('split sizes:')
191
- for key in ['train', 'val', 'test']:
192
- print(key, len(self.splits[key]))
193
- if not self.formality:
194
- print('total words', self.total_words)
195
- print('vocab size', len(self.index2word))
196
-
197
-
198
- def shuffle(self, split, seed=None):
199
- assert split in ['train', 'val', 'test']
200
- if seed is not None:
201
- random.seed(seed)
202
- random.shuffle(self.splits[split])
203
-
204
-
205
- def loader(self, split, num_workers=20, indices=None):
206
- assert split in ['train', 'val', 'test']
207
- data = self.splits[split] if indices is None else [self.splits[split][i] for i in indices]
208
- return torch.utils.data.DataLoader(SplitLoader(data, self), batch_size=self.batch_size, pin_memory=True, collate_fn=collate, num_workers=num_workers)
209
-
210
-
211
- class SplitLoader(torch.utils.data.IterableDataset):
212
- def __init__(self, data, parent):
213
- super(SplitLoader).__init__()
214
- self.data = data
215
- self.pos = 0
216
- self.parent = parent
217
-
218
-
219
- def __len__(self):
220
- return len(self.data)
221
-
222
-
223
- def __iter__(self):
224
- return self
225
-
226
-
227
- def __next__(self):
228
- increment = 1
229
- worker_info = torch.utils.data.get_worker_info()
230
- if worker_info is not None: # # in a worker process
231
- increment = worker_info.num_workers
232
- worker_id = worker_info.id
233
- if self.pos == 0:
234
- self.pos = worker_id
235
- valid = False
236
- while not valid:
237
- if self.pos >= len(self):
238
- raise StopIteration
239
- if self.parent.topic:
240
- failed = False
241
- future_word_num_syllables, rhyme_group_index, syllables_to_go = -1, -1, -1
242
- raw_sentence, classification_label = self.data[self.pos], -1
243
- original_sentence = raw_sentence.split()
244
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
245
- length = len(sentence)
246
- min_sentence_length = MIN_SENTENCE_LENGTH
247
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
248
- pos_to_split = random.randint(1, length - 1) # for lm, learn all positions at once
249
- inp = sentence[:pos_to_split]
250
- length = len(inp)
251
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
252
- if not failed and num_words_in_input < len(original_sentence):
253
- future_word_position_max = len(original_sentence) - 1
254
- future_word_position = random.randint(num_words_in_input-1, future_word_position_max) # allow the last possibly partial word though
255
- future_word = original_sentence[future_word_position]
256
- unstripped_future_word = future_word
257
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
258
- if not failed and future_word in self.parent.word2index.keys():
259
- word_log_prob = math.log(self.parent.vocab[future_word] / self.parent.total_words) # roughly baseline prob of word under noise model
260
- future_word = self.parent.word2index[future_word]
261
- pad_id = self.parent.gpt_pad_id
262
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
263
- valid = not failed
264
- elif self.parent.formality:
265
- future_word_num_syllables, rhyme_group_index, syllables_to_go = -1, -1, -1
266
- raw_sentence, classification_label = self.data[self.pos]
267
- original_sentence = raw_sentence.split()
268
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
269
- length = len(sentence)
270
- min_sentence_length = MIN_SENTENCE_LENGTH
271
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
272
- pos_to_split = length # no need to split; we're going to train on all possible prefixes simultaneously for efficiency
273
- inp = sentence[:pos_to_split]
274
- length = len(inp)
275
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
276
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
277
- future_word_position_max = len(original_sentence) - 1
278
- future_word_position = 0
279
- future_word = 'placeholder'
280
- unstripped_future_word = future_word
281
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
282
- word_log_prob, future_word = 0, 0
283
- pad_id = self.parent.gpt_pad_id
284
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
285
- valid = True
286
- elif self.parent.iambic:
287
- failed = False
288
- future_word_num_syllables, rhyme_group_index, syllables_to_go = -1, -1, -1
289
- raw_sentence, classification_label = self.data[self.pos], -1
290
- original_sentence = raw_sentence.split()
291
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
292
- length = len(sentence)
293
- min_sentence_length = MIN_SENTENCE_LENGTH
294
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
295
- pos_to_split = random.randint(0, length - 1)
296
- # try to get a subseq of exactly 10 syllables
297
- inp = sentence[pos_to_split:]
298
- num_syllables = 0
299
- checked = False
300
- for i in range(1, len(inp)):
301
- decoded = self.parent.tokenizer.decode(inp[:i])
302
- num_syllables = count_syllables(decoded)
303
- if num_syllables > POETRY_LINE_SYLLABLES:
304
- inp = inp[:i-1] # might get a few data points where the split is in the middle of a word, but it should be ok for learning.
305
- last_line_length = i-1
306
- decoded = self.parent.tokenizer.decode(inp)
307
- num_syllables = count_syllables(decoded)
308
- checked = True
309
- break
310
- if not checked or num_syllables != POETRY_LINE_SYLLABLES:
311
- failed = True
312
- length = len(inp)
313
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
314
- classification_label = [is_iambic(self.parent.tokenizer.decode(inp)) for _ in range(length)] # predict for whole seq including future
315
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
316
- future_word_position_max = len(original_sentence) - 1
317
- future_word_position = 0
318
- future_word = 'placeholder'
319
- unstripped_future_word = future_word
320
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
321
- if not failed:
322
- word_log_prob, future_word = 0, 0
323
- pad_id = self.parent.gpt_pad_id
324
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
325
- valid = not failed
326
- elif self.parent.rhyme:
327
- failed = False
328
- future_word_num_syllables, rhyme_group_index = -1, -1
329
- raw_sentence, classification_label = self.data[self.pos], -1
330
- original_sentence = raw_sentence.split()
331
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
332
- length = len(sentence)
333
- min_sentence_length = MIN_SENTENCE_LENGTH
334
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
335
- pos_to_split = random.randint(1, length - 1) # for lm, learn all positions at once
336
- inp = sentence[:pos_to_split]
337
- length = len(inp)
338
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
339
- if not failed and num_words_in_input < len(original_sentence):
340
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
341
- future_word_position_max = min(len(original_sentence) - 1, num_words_in_input + MAX_COUNT_SYLLABLE_DIST)
342
- future_word_position = random.randint(num_words_in_input-1, future_word_position_max) # allow the last possibly partial word though
343
- future_word = original_sentence[future_word_position]
344
- unstripped_future_word = future_word
345
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
346
-
347
- words_in_between = original_sentence[num_words_in_input-1:future_word_position+1]
348
- syllables_to_go = count_syllables(' '.join(words_in_between))
349
- if syllables_to_go > MAX_COUNT_SYLLABLE_DIST:
350
- failed = True
351
- future_word_num_syllables = count_syllables(future_word)
352
- rhyme_group = self.parent.word2rhyme_group[future_word]
353
- rhyme_group_index = self.parent.rhyme_group2index[rhyme_group]
354
- # truncate context a bit since we're just doing couplets. random length from 1 to max desired length for this purpose.
355
- desired_length = random.randint(1, MAX_COUNT_SYLLABLE_INPUT_LENGTH)
356
- inp = inp[-desired_length:]
357
- length = len(inp)
358
-
359
- if not failed and future_word in self.parent.word2index.keys():
360
- word_log_prob = math.log(self.parent.rhyme_group_counts[rhyme_group] / self.parent.total_rhyme_groups)
361
- future_word = rhyme_group_index # future conditioning is just the rhyme group in this case
362
- pad_id = self.parent.gpt_pad_id
363
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
364
- valid = not failed
365
- elif self.parent.newline:
366
- failed = False
367
- future_word_num_syllables, rhyme_group_index = -1, -1
368
- raw_sentence, classification_label = self.data[self.pos], -1
369
- original_sentence = raw_sentence.split()
370
- sentence = self.parent.tokenizer.encode(raw_sentence, return_tensors='pt')[0]
371
- length = len(sentence)
372
- min_sentence_length = MIN_SENTENCE_LENGTH
373
- if len(sentence) > min_sentence_length: # set to 3. well, everything in data is > 3 for the bag of words task
374
- pos_to_split = random.randint(1, length - 1) # for lm, learn all positions at once
375
- inp = sentence[:pos_to_split]
376
- while pos_to_split < len(sentence):
377
- if len(self.parent.tokenizer.decode(inp).split()) == len(self.parent.tokenizer.decode(sentence[:pos_to_split + 1]).split()):
378
- pos_to_split += 1
379
- inp = sentence[:pos_to_split]
380
- else:
381
- break
382
- length = len(inp)
383
- num_words_in_input = len(self.parent.tokenizer.decode(inp).split())
384
- if not failed and num_words_in_input < len(original_sentence):
385
- # only look up to 10 words ahead if we're doing count syllables, since we'll filter out anything more than 10 syllables ahead anyway
386
- future_word_position_max = len(original_sentence) - 1
387
- future_word_position = random.randint(num_words_in_input-1, future_word_position_max) # allow the last possibly partial word though
388
- future_word = original_sentence[future_word_position]
389
- unstripped_future_word = future_word
390
- future_word = future_word.strip().strip(string.punctuation) # NOTE: we didn't strip punctuation for the topic bag of words paper experiments for our method. it doesn't make much difference, though.
391
-
392
- # future_word = original_sentence[-1] # useful for debugging
393
- words_in_between = original_sentence[num_words_in_input-1:future_word_position+1]
394
- syllables_to_go = count_syllables(' '.join(words_in_between))
395
- if syllables_to_go > MAX_COUNT_SYLLABLE_DIST:
396
- failed = True
397
- # truncate context a bit since we're just doing couplets. random length from 1 to max desired length for this purpose.
398
- desired_length = random.randint(1, MAX_COUNT_SYLLABLE_INPUT_LENGTH)
399
- # desired_length = 10 # useful for debugging
400
- inp = inp[-desired_length:]
401
- length = len(inp)
402
- true_label = 1 if unstripped_future_word.strip()[-1] in PHRASE_ENDS else 0 # common ways to end a phrase
403
- classification_label = [-1 for _ in range(length)]
404
- classification_label[-1] = true_label # only learn at the last position
405
- if not failed and future_word in self.parent.word2index.keys():
406
- word_log_prob = math.log(self.parent.vocab[future_word] / self.parent.total_words) # roughly baseline prob of word under noise model
407
- future_word = self.parent.word2index[future_word]
408
- pad_id = self.parent.gpt_pad_id
409
- example = (inp, length, future_word, word_log_prob, pad_id, classification_label, syllables_to_go, future_word_num_syllables, rhyme_group_index)
410
- valid = not failed
411
- else:
412
- raise NotImplementedError
413
-
414
- self.pos += increment
415
- return example