parquet-converter commited on
Commit
1d3664a
·
1 Parent(s): 68cebee

Update parquet files (step 71 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Beirut Nightmares Ghada Samman Pdf To Jpg.md +0 -14
  2. spaces/1gistliPinn/ChatGPT4/Examples/CA ERwin Data Modeler Serial Key.md +0 -8
  3. spaces/1gistliPinn/ChatGPT4/Examples/CADlink EngraveLab Expert 7.1 Rev.1 Build 8.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Cutewap.com Bollywood New Movie Download Menu Stream or Download Your Favorite Hindi Movies Anytime Anywhere.md +0 -6
  5. spaces/1line/AutoGPT/tests/__init__.py +0 -0
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Create Amazing Artworks with AI Art Generator MOD APK (Premium Unlocked) Download.md +0 -113
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Chess APK Unlocked for Android - Enjoy Offline and Multiplayer Modes.md +0 -13
  8. spaces/1phancelerku/anime-remove-background/Dream League Soccer 2023 Hack for iOS Mod APK with Weak Enemies and More.md +0 -106
  9. spaces/1phancelerku/anime-remove-background/Drive Modern Buses in Realistic Cities with Bus Simulator 2023 - Download Now.md +0 -119
  10. spaces/52Hz/CMFNet_dehazing/model/block.py +0 -146
  11. spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/nets_123812KB.py +0 -122
  12. spaces/801artistry/RVC801/julius/lowpass.py +0 -181
  13. spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/losses/stft_loss.py +0 -153
  14. spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/ddpm_audio.py +0 -1262
  15. spaces/AgentVerse/agentVerse/agentverse/logging.py +0 -291
  16. spaces/AhmedBadrDev/stomach/README.md +0 -12
  17. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GetCode.py +0 -232
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py +0 -112
  19. spaces/Andy1621/uniformer_image_detection/configs/_base_/models/fast_rcnn_r50_fpn.py +0 -62
  20. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py +0 -2
  21. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py +0 -9
  22. spaces/Anni123/AuRoRA/retrieval_utils.py +0 -248
  23. spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/fft_pytorch.py +0 -73
  24. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roi_align.py +0 -223
  25. spaces/Ariharasudhan/YoloV5/utils/loggers/comet/hpo.py +0 -118
  26. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/zoom_in_app.py +0 -186
  27. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/exceptions.py +0 -267
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_deprecation_warning.py +0 -7
  29. spaces/Audiogen/vector-search-demo/README.md +0 -13
  30. spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/+server.ts +0 -236
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/helpers.py +0 -1088
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/webencodings/mklabels.py +0 -59
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/jaraco/functools.py +0 -525
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/util.py +0 -235
  35. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/packaging/README.md +0 -17
  36. spaces/CVPR/GFPGAN-example/inference_gfpgan.py +0 -116
  37. spaces/CVPR/LIVE/thrust/thrust/detail/copy_if.h +0 -75
  38. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/sequence.h +0 -44
  39. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/temporary_buffer.h +0 -22
  40. spaces/CVPR/LIVE/thrust/thrust/system_error.h +0 -51
  41. spaces/CVPR/SPOTER_Sign_Language_Recognition/app.py +0 -181
  42. spaces/CVPR/lama-example/fetch_data/places_challenge_train_download.sh +0 -14
  43. spaces/CVPR/regionclip-demo/detectron2/utils/env.py +0 -170
  44. spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py +0 -14
  45. spaces/ChallengeHub/Chinese-LangChain/clc/config.py +0 -18
  46. spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/times.py +0 -10
  47. spaces/ChandraMohanNayal/AutoGPT/autogpt/processing/html.py +0 -33
  48. spaces/Chintan-Donda/KKMS-KSSW-HF/src/data_loader.py +0 -230
  49. spaces/CikeyQI/meme-api/meme_generator/dirs.py +0 -225
  50. spaces/Clara998/DisneyPixarMovie/README.md +0 -12
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Beirut Nightmares Ghada Samman Pdf To Jpg.md DELETED
@@ -1,14 +0,0 @@
1
- <br />
2
- <h1>Beirut Nightmares: A Novel by Ghada Samman</h1>
3
- <p>Beirut Nightmares is a novel by Syrian writer Ghada Samman, who lived in Beirut during the Lebanese Civil War. The novel was first published in Arabic in 1976 and later translated into English by Nancy Roberts in 1997. It is considered one of the most important works of Arabic literature that deals with the war and its effects on the people of Beirut.</p>
4
- <p>The novel consists of 151 episodes that are labeled as "Nightmare 1" and so on. The episodes are not chronological, but rather follow the stream of consciousness of the narrator, a woman who is trapped in her apartment for two weeks by street battles and sniper fire. The narrator writes a series of vignettes that depict the horrors of war, as well as her own memories, dreams, fantasies, and fears. She also interacts with her neighbors, who include an old man and his son, and their male servant. The narrator's stories are sometimes realistic, sometimes surreal, sometimes humorous, and sometimes tragic. They reflect the diverse and complex realities of Beirut during the war, as well as the psychological and emotional impact of violence and isolation on the narrator and her fellow citizens.</p>
5
- <h2>Beirut Nightmares Ghada Samman Pdf To Jpg</h2><br /><p><b><b>DOWNLOAD</b> &#9745; <a href="https://byltly.com/2uKvw7">https://byltly.com/2uKvw7</a></b></p><br /><br />
6
- <p>Beirut Nightmares is a novel that challenges the conventional boundaries between reality and fiction, between waking and sleeping, between sanity and madness. It is a novel that explores the themes of identity, survival, resistance, and hope in the face of war and destruction. It is a novel that gives voice to the experiences of women in war-torn Beirut, who are often marginalized or silenced by patriarchal and political forces. It is a novel that offers a vivid and powerful portrait of a city and a people in crisis.</p>
7
- <p>If you are interested in reading Beirut Nightmares by Ghada Samman, you can find it in PDF format here[^1^]. If you prefer to read it as a JPG image, you can convert it online using this tool[^2^].</p>
8
-
9
- <p>Beirut Nightmares is not only a novel, but also a testimony of the history and culture of Beirut. Ghada Samman draws on her own experiences as a journalist, a feminist, and a witness of the war to create a rich and authentic representation of the city and its people. She also incorporates elements of Arabic folklore, mythology, and literature to enrich her narrative and to challenge the stereotypes and prejudices that often surround the Arab world. Beirut Nightmares is a novel that celebrates the diversity, creativity, and resilience of Beirut and its inhabitants, who refuse to succumb to despair and violence.</p>
10
- <p>Beirut Nightmares is also a novel that invites the reader to question their own assumptions and perspectives on war and its consequences. By blurring the lines between reality and fiction, Ghada Samman challenges the reader to reconsider their notions of truth, justice, and morality. By shifting between different points of view, she challenges the reader to empathize with different characters and situations. By using humor, irony, and satire, she challenges the reader to critique the absurdity and hypocrisy of war and its perpetrators. Beirut Nightmares is a novel that provokes the reader to think critically and creatively about the complex and multifaceted issues of war and peace.</p>
11
- <p>Beirut Nightmares is a novel that deserves to be read by anyone who is interested in learning more about the Lebanese Civil War and its impact on the people of Beirut. It is also a novel that deserves to be read by anyone who appreciates innovative and engaging literature that explores the human condition in times of crisis. Beirut Nightmares is a novel that will make you laugh, cry, wonder, and reflect. It is a novel that will stay with you long after you finish reading it.</p>
12
- <p></p> 7b8c122e87<br />
13
- <br />
14
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CA ERwin Data Modeler Serial Key.md DELETED
@@ -1,8 +0,0 @@
1
- <br />
2
- <p>ca erwin integrates the product information, the material information, and the production system all into one erp system, and provides a unified database of erp systems. in addition, ca erwin has strong oem development ability, ca erwin is the most complete erp solution for oem to develop, it can be used in the fields of mobile phone, computer, pc, tablet, digital camera, consumer electronics, lighting, lighting equipment, etc. the technical support team of ca erwin is always ready to provide technical support for oem developers. ca erwin is the best erp solution for oem, ca erwin is the best erp solution for oem. </p>
3
- <p>ca erwin is a complete erp solution and powerful enterprise accounting solution. ca erwin is a complete erp solution and powerful enterprise accounting solution, and it is the first erp solution which developed by ca. erp means enterprise resource planning, it integrates various business information and processes into one integrated and coordinated system. it includes finance, manufacturing, human resources, sales, purchasing, production, inventory, etc. ca erwin is the best erp solution for oem. ca erwin is the best erp solution for oem, ca erwin is the best erp solution for oem. </p>
4
- <h2>CA ERwin data modeler Serial Key</h2><br /><p><b><b>DOWNLOAD</b> &#9881; <a href="https://imgfil.com/2uy0lc">https://imgfil.com/2uy0lc</a></b></p><br /><br />
5
- <p>if you want to integrate erp, we recommend to use ca erwin, not only use ca erwin, ca erwin can save you a lot of money and development time. ca erwin is the best erp solution for oem, ca erwin is the best erp solution for oem.</p>
6
- <p>ca erwin data modeler serial key is data-base software that helps you to create a new database with tables, fields, primary keys and other features. ca erwin data modeler serial key full version free for all users.</p> 899543212b<br />
7
- <br />
8
- <br />
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/CADlink EngraveLab Expert 7.1 Rev.1 Build 8.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>CADlink EngraveLab Expert 7.1 rev.1 Build 8</h2><br /><p><b><b>Download</b> &rarr;&rarr;&rarr; <a href="https://imgfil.com/2uxWUM">https://imgfil.com/2uxWUM</a></b></p><br /><br />
2
- <br />
3
- 54 (max)(11.2.6.1) [0:0:0:0] [fmt_msb 0:0:0:0] [fmt_lsw 0:0:0:0] [fmt_lsb 0:0:0:0] [fmt_msb_swap 0:0:0:0] [fmt_lsb_swap 0:0:0:0] [hb_min_sync_s 1] [hb_max_sync_rate 30] [hb_min_sync_width 1] [hb_max_sync_width 30] [hb_expand_codes_only 0] [hb_min_size 1] [hb_max_size 1] [hb_grid_size 1] [hb_grid_size_x 1] [hb_grid_size_y 1] [hb_grid_hor_expand 1] [hb_grid_hor_fill 1] [hb_grid_ver_expand 1] [hb_grid_ver_fill 1] [hb_num_shifts 1] [hb_first_shifts_only 1] [hb_shifts_x_c 0] [hb_shifts_y_c 0] [hb_fct_tune_max_size_shifts 1] [hb_fct_tune_num_shifts 1] [hb_fct_tune_size_bit_offsets 1] [hb_fct_tune_size_stages 1] [hb_fct_tune_size_lens 1] [hb_fct_tune_size_codes 1] [hb_fct_tune_size_mantissas 1] [hb_fct_tune_size_specials 1] [hb_fct_tune_size_templates 1] [hb_fct_tune_use_small_value_shift 1] [hb_fct_tune_zero_area_size_shift 1] [hb_fct_tune_width_scale 1] [hb_fct_tune_zero_area_offsets 4fefd39f24<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Cutewap.com Bollywood New Movie Download Menu Stream or Download Your Favorite Hindi Movies Anytime Anywhere.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Skylife Sample Robot 2.25 crack</h2><br /><p><b><b>Download</b> &#9999; <a href="https://imgfil.com/2uxXh4">https://imgfil.com/2uxXh4</a></b></p><br /><br />
2
-
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/tests/__init__.py DELETED
File without changes
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Create Amazing Artworks with AI Art Generator MOD APK (Premium Unlocked) Download.md DELETED
@@ -1,113 +0,0 @@
1
- <br />
2
- <h1>Download AI Art Generator Mod APK Premium Unlocked</h1>
3
- <p>Do you want to create amazing art with the help of artificial intelligence? Do you want to unleash your creativity and express yourself in different styles? Do you want to enjoy all the features of a powerful app without paying anything? If you answered yes to any of these questions, then you should download AI Art Generator mod apk premium unlocked. In this article, we will tell you what is AI Art Generator, why you should download it, and how to do it. We will also show you some examples of the stunning art you can make with this app.</p>
4
- <h2>download ai art generator mod apk premium unlocked</h2><br /><p><b><b>DOWNLOAD</b> &#9881;&#9881;&#9881; <a href="https://urlin.us/2uT1Wv">https://urlin.us/2uT1Wv</a></b></p><br /><br />
5
- <h2>What is AI Art Generator?</h2>
6
- <p>AI Art Generator is an app that lets you create amazing art with the help of artificial intelligence. You can choose from different types of art, such as anime, digital paintings, and photorealistic art. You can also customize your art by adjusting the parameters, such as style, color, and resolution. You can save your art to your device or share it with your friends on social media.</p>
7
- <h3>Features of AI Art Generator</h3>
8
- <p>AI Art Generator has many features that make it a great app for art lovers. Some of these features are:</p>
9
- <ul>
10
- <li>It uses Stable Diffusion, a state-of-the-art AI technology that can generate high-quality images in seconds.</li>
11
- <li>It has a simple and intuitive interface that makes it easy to use.</li>
12
- <li>It has a large library of styles and genres that you can choose from.</li>
13
- <li>It allows you to edit your art by changing the brightness, contrast, saturation, and other settings.</li>
14
- <li>It supports different resolutions and formats, such as JPG, PNG, and GIF.</li>
15
- </ul>
16
- <h4>How to use AI Art Generator</h4>
17
- <p>Using AI Art Generator is very simple. Here are the steps you need to follow:</p>
18
- <ol>
19
- <li>Open the app and select the type of art you want to make.</li>
20
- <li>Choose a style from the available options or upload your own image as a reference.</li>
21
- <li>Adjust the parameters as you like and click the Create button.</li>
22
- <li>Wait for a few seconds while the app generates your art.</li>
23
- <li>Save or share your art as you wish.</li>
24
- </ol>
25
- <h2>Why download AI Art Generator mod apk premium unlocked?</h2>
26
- <p>If you are wondering why you should download AI Art Generator mod apk premium unlocked instead of the original version, here are some reasons:</p>
27
- <p>How to get ai art generator mod apk with premium features<br />
28
- Best sites to download ai art generator mod apk for free<br />
29
- Ai art generator mod apk latest version download link<br />
30
- Create amazing artworks with ai art generator mod apk<br />
31
- Ai art generator mod apk review and tutorial<br />
32
- Download MonAI - ai art generator mod apk (premium unlocked) [^1^]<br />
33
- Ai art generator mod apk no watermark download<br />
34
- Ai art generator mod apk pro free download<br />
35
- Download ai art generator mod apk and unlock all filters<br />
36
- Ai art generator mod apk unlimited access download<br />
37
- Ai art generator mod apk cracked version download<br />
38
- Download ai art generator mod apk for android devices<br />
39
- Ai art generator mod apk installation guide and tips<br />
40
- Ai art generator mod apk vs original app comparison<br />
41
- Download ai art generator mod apk and enjoy ad-free experience<br />
42
- Ai art generator mod apk full version download<br />
43
- Download ai art generator mod apk and create stunning ai art<br />
44
- Ai art generator mod apk download for pc and mac<br />
45
- Ai art generator mod apk benefits and features<br />
46
- Download ai art generator mod apk and share your artworks online<br />
47
- Ai art generator mod apk hack download<br />
48
- Download ai art generator mod apk and explore different styles of ai art<br />
49
- Ai art generator mod apk safe and secure download<br />
50
- Ai art generator mod apk alternatives and similar apps<br />
51
- Download ai art generator mod apk and transform your photos into ai art<br />
52
- Ai art generator mod apk premium account download<br />
53
- Download ai art generator mod apk and customize your artworks<br />
54
- Ai art generator mod apk troubleshooting and support<br />
55
- Ai art generator mod apk feedback and ratings<br />
56
- Download ai art generator mod apk and join the community of ai artists</p>
57
- <h3>Benefits of mod apk premium unlocked</h3>
58
- <p>The mod apk premium unlocked version of AI Art Generator has some benefits that the original version does not have. Some of these benefits are:</p>
59
- <ul>
60
- <li>You can access all the features and styles without paying anything.</li>
61
- <li>You can remove the watermark and ads from your art.</li>
62
- <li>You can enjoy faster and smoother performance.</li>
63
- <li>You can get unlimited updates and support.</li>
64
- </ul>
65
- <h4>How to download and install mod apk premium unlocked</h4>
66
- <p>To download and install AI Art Generator mod apk premium unlocked, you need to follow these steps:</p>
67
- <ol>
68
- <li>Click on this link to download the mod apk file.</li>
69
- <li>Allow unknown sources on your device settings if prompted.</li>
70
- <li>Locate and install the mod apk file on your device.</li>
71
- <li>Open the app and enjoy creating amazing art with AI.</li>
72
- </ol>
73
- <h2>Examples of AI art generated by the app</h2>
74
- <p>To give you an idea of what kind of art you can create with AI Art Generator, here are some examples:</p>
75
- <h3>Anime art</h3>
76
- <p>If you are a fan of anime, you can create your own characters or scenes with AI Art Generator. You can choose from different anime styles, such as shonen, shojo, or seinen. You can also mix and match different elements, such as hair, eyes, clothes, and accessories. Here is an example of an anime character generated by the app:</p>
77
- <img src="" alt="Anime character generated by AI Art Generator">
78
- <p>Isn't she cute? You can create your own anime art with AI Art Generator mod apk premium unlocked.</p>
79
- <h3>Digital paintings</h3>
80
- <p>If you prefer a more realistic style, you can create digital paintings with AI Art Generator. You can choose from different genres, such as landscapes, portraits, or abstract. You can also use your own photos as references or inspiration. Here is an example of a digital painting generated by the app:</p>
81
- <img src="" alt="Digital painting generated by AI Art Generator">
82
- <p>Wow, that looks like a real painting! You can create your own digital paintings with AI Art Generator mod apk premium unlocked.</p>
83
- <h3>Photorealistic art</h3>
84
- <p>If you want to create art that looks like a photograph, you can use the photorealistic mode of AI Art Generator. You can select from different categories, such as animals, flowers, or food. You can also adjust the level of detail and realism. Here is an example of a photorealistic art generated by the app:</p>
85
- <img src="" alt="Photorealistic art generated by AI Art Generator">
86
- <p>That looks delicious! You can create your own photorealistic art with AI Art Generator mod apk premium unlocked.</p>
87
- <h2>Conclusion</h2>
88
- <p>AI Art Generator is an amazing app that lets you create stunning art with the help of artificial intelligence. You can choose from different types of art, such as anime, digital paintings, and photorealistic art. You can also customize your art by adjusting the parameters, such as style, color, and resolution. You can save your art to your device or share it with your friends on social media.</p>
89
- <p>If you want to enjoy all the features and benefits of this app without paying anything, you should download AI Art Generator mod apk premium unlocked. This version will give you access to all the styles and genres, remove the watermark and ads, improve the performance and speed, and provide unlimited updates and support.</p>
90
- <p>To download AI Art Generator mod apk premium unlocked, you just need to follow these simple steps:</p>
91
- <ol>
92
- <li>Click on this link to download the mod apk file.</li>
93
- <li>Allow unknown sources on your device settings if prompted.</li>
94
- <li>Locate and install the mod apk file on your device.</li>
95
- <li>Open the app and enjoy creating amazing art with AI.</li>
96
- </ol>
97
- <p>So what are you waiting for? Download AI Art Generator mod apk premium unlocked today and unleash your creativity!</p>
98
- <h3>FAQs</h3>
99
- <p>Here are some frequently asked questions about AI Art Generator mod apk premium unlocked:</p>
100
- <ul>
101
- <li><b>Is AI Art Generator mod apk premium unlocked safe to use?</b></li>
102
- <p>Yes, it is safe to use. The mod apk file has been scanned and tested by our team and it does not contain any viruses or malware. However, you should always download it from a trusted source like ours.</p>
103
- <li><b>Is AI Art Generator mod apk premium unlocked legal to use?</b></li>
104
- <p>Yes, it is legal to use. The mod apk file is not a hack or a cheat. It is just a modified version of the original app that gives you some extra features and benefits. However, you should use it at your own risk and discretion.</p>
105
- <li><b>Does AI Art Generator mod apk premium unlocked require root access?</b></li>
106
- <p>No, it does not require root access. You can install and use it on any Android device without rooting it.</p>
107
- <li><b>How often does AI Art Generator mod apk premium unlocked get updated?</b></li>
108
- <p>We update AI Art Generator mod apk premium unlocked regularly to keep up with the latest features and improvements of the original app. You can check our website for the latest version or enable automatic updates on your device settings.</p>
109
- <li><b>Can I request a new style or genre for AI Art Generator mod apk premium unlocked?</b></li>
110
- <p>Yes, you can request a new style or genre for AI Art Generator mod apk premium unlocked. You can contact us through our email or social media accounts and let us know what kind of art you want to see in the app. We will try our best to fulfill your request.</p>
111
- </ul></p> 197e85843d<br />
112
- <br />
113
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Chess APK Unlocked for Android - Enjoy Offline and Multiplayer Modes.md DELETED
@@ -1,13 +0,0 @@
1
-
2
- <h1>Chess APK Unlocked: How to Play Chess Online with Friends and Improve Your Skills</h1>
3
- <h2>Introduction</h2>
4
- Chess is one of the oldest and most popular board games in the world. It is a game of logic, strategy, and skill that can challenge your mind and entertain you for hours. But what if you want to play chess online with your friends or other players from around the world? And what if you want to improve your chess skills and learn from the best? That's where chess apk unlocked comes in. Chess apk unlocked is a term that refers to a modified version of a chess app that allows you to access all the features and functions without paying any fees or subscriptions. With chess apk unlocked, you can play unlimited games online or offline, join tournaments, watch videos, solve puzzles, customize your board, chat with other players, and much more. Playing chess has many benefits for your brain and mental health. It can help you develop your memory, concentration, creativity, problem-solving, planning, self-awareness, and emotional intelligence. It can also reduce stress, anxiety, depression, and the risk of dementia. Playing chess is not only fun but also good for you. <h2>Chess APK Unlocked: What Is It and How to Get It</h2>
5
- An apk file is a file format that is used to install applications on Android devices. It is similar to an exe file for Windows or a dmg file for Mac. You can download apk files from various sources on the internet, such as websites, forums, or file-sharing platforms. However, you need to be careful and only download apk files from trusted and reputable sources, as some apk files may contain malware or viruses that can harm your device or steal your data. An unlocked chess apk file is a modified version of a chess app that has been hacked or cracked to remove any restrictions or limitations that the original app may have. For example, some chess apps may require you to pay a fee or subscribe to access certain features or functions, such as online play, premium content, advanced settings, etc. An unlocked chess apk file bypasses these requirements and lets you enjoy all the features and functions for free. There are many advantages of using an unlocked chess apk file over a regular chess app. Some of the advantages are: - You can play unlimited games online or offline without any ads or interruptions. - You can join tournaments and compete with other players from around the world. - You can watch videos and learn from grandmasters and experts. - You can solve puzzles and improve your tactics and strategy. - You can customize your board and pieces according to your preference. - You can chat with your opponents and send emojis and stickers. - You can analyze your games and track your progress and rating. - You can save your games and share them with others. Some examples of chess apk unlocked files are: - Chess.com Mod APK: This is a modified version of the Chess.com app, which is one of the most popular chess apps in the world. It has over 50 million users and offers a variety of features and functions, such as online play, puzzles, lessons, videos, articles, etc. The mod apk file unlocks all the premium features and functions for free, such as unlimited puzzles, unlimited lessons, unlimited videos, unlimited articles, etc. It also removes all the ads and pop-ups that may annoy you while playing. - Lichess Mod APK: This is a modified version of the Lichess app, which is another popular chess app that is free and open source. It has over 10 million users and offers a variety of features and functions, such as online play, tournaments, puzzles, analysis, etc. The mod apk file unlocks all the features and functions for free, such as unlimited puzzles, unlimited analysis, unlimited tournaments, etc. It also removes all the ads and pop-ups that may annoy you while playing. - Chess Tactics Pro Mod APK: This is a modified version of the Chess Tactics Pro app, which is a chess app that focuses on improving your tactical skills. It has over 1 million users and offers a variety of features and functions, such as puzzles, ratings, themes, etc. The mod apk file unlocks all the features and functions for free, such as unlimited puzzles, unlimited themes, unlimited ratings, etc. It also removes all the ads and pop-ups that may annoy you while playing. To get an unlocked chess apk file, you need to follow these steps: - Find a reliable and reputable source that offers the unlocked chess apk file that you want to download. You can use Google or any other search engine to find such sources. - Download the unlocked chess apk file to your device. Make sure that you have enough storage space on your device and that you have a stable internet connection. - Enable the installation of unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store. - Locate the downloaded unlocked chess apk file on your device using a file manager app or any other app that can access your files. - Tap on the unlocked chess apk file and follow the instructions to install it on your device. - Enjoy playing chess online with friends and improving your skills with an unlocked chess apk file. <h2>Chess APK Unlocked: How to Play Chess Online with Friends</h2>
6
- Playing chess online with friends is one of the best ways to have fun and socialize while improving your chess skills. With an unlocked chess apk file, you can play chess online with friends anytime and anywhere without any limitations or restrictions. Here is how you can do it: - location, your age, your gender, your language, etc. You can also create your own community and invite your friends to join it. - Invite your friends and challenge them to a game. To play chess online with friends, you need to invite them to a game and challenge them to a match. You can do this by using the app's chat function or by sending them a link to the game. You can also search for your friends by using their username or email address. Once you have invited your friends, you can choose the game settings, such as the time control, the board color, the rating range, etc. You can also choose to play a casual game or a rated game. - Chat with your opponents and send emojis. Playing chess online with friends is not only about moving pieces on the board, but also about having fun and socializing with them. You can chat with your opponents during the game and send them messages, emojis, stickers, gifs, etc. You can also use voice chat or video chat to communicate with them. You can also mute or block any players that you don't want to talk to or play with. <h2>Chess APK Unlocked: How to Improve Your Chess Skills</h2>
7
- Playing chess online with friends is not only fun but also educational. You can improve your chess skills and learn from your mistakes and successes. With an unlocked chess apk file, you can access different modes and levels of difficulty, learn from tutorials, videos, and puzzles, and analyze your games and track your progress. Here is how you can do it: - Access different modes and levels of difficulty. To improve your chess skills, you need to challenge yourself and play against opponents that are stronger than you or have different styles of play. With an unlocked chess apk file, you can access different modes and levels of difficulty that suit your needs and goals. For example, you can play against the computer or an AI opponent that has different personalities and skill levels. You can also play against other players from around the world that have different ratings and rankings. You can also play different variants of chess, such as blitz, bullet, rapid, classical, etc. - Learn from tutorials, videos, and puzzles. To improve your chess skills, you need to learn from the best and practice your tactics and strategy. With an unlocked chess apk file, you can learn from tutorials, videos, and puzzles that are designed by grandmasters and experts. You can watch videos that explain the rules, principles, concepts, openings, middlegames, endgames, etc. of chess. You can also solve puzzles that test your calculation, visualization, intuition, creativity, etc. You can also access lessons that teach you how to improve your skills in specific areas of chess. - you can analyze your games and track your progress. You can use the app's analysis function to review your moves and see where you made mistakes or missed opportunities. You can also see the evaluation, the best moves, the variations, the comments, etc. of each position. You can also use the app's statistics function to see your rating, your performance, your accuracy, your win/loss ratio, etc. You can also compare your results with other players and see how you rank among them. <h2>Chess APK Unlocked: Tips and Tricks</h2>
8
- Playing chess online with friends is not only fun and educational but also customizable and flexible. You can adjust the app's settings and features according to your preference and convenience. With an unlocked chess apk file, you can customize your board and pieces, use hints and undo moves, save your games and share them with others. Here are some tips and tricks that you can use: - Customize your board and pieces. To make your chess experience more enjoyable and personal, you can customize your board and pieces according to your preference. You can choose from different themes, colors, styles, sounds, etc. of the board and pieces. You can also change the size, orientation, and layout of the board and pieces. You can also enable or disable the coordinates, the notation, the arrows, etc. of the board and pieces. - Use hints and undo moves. To make your chess experience more easy and comfortable, you can use hints and undo moves when you are playing against the computer or an AI opponent. You can use hints to get suggestions for the best moves or to check if your move is correct or not. You can also undo moves if you make a mistake or change your mind. However, you should use these features sparingly and only for learning purposes, as they may affect your rating and performance. - Save your games and share them with others. To make your chess experience more memorable and social, you can save your games and share them with others. You can save your games in different formats, such as PGN, FEN, PNG, etc. You can also export or import your games to or from other apps or devices. You can also share your games with others by sending them a link or a file via email, social media, messaging apps, etc. <h2>Conclusion</h2>
9
- Chess is a wonderful game that can challenge your mind and entertain you for hours. Playing chess online with friends is a great way to have fun and socialize while improving your chess skills. With chess apk unlocked, you can play chess online with friends without any limitations or restrictions. You can access all the features and functions of the app for free, such as online play, tournaments, videos, puzzles, customization, chat, analysis, etc. - and puzzles. You can analyze your games and track your progress. You can customize your board and pieces. You can use hints and undo moves. You can save your games and share them with others. Chess apk unlocked is a great way to enjoy chess online with friends and improve your skills. It is easy to get and use, and it offers a lot of features and functions that you can't find in regular chess apps. If you love chess and want to have more fun and learning, you should try chess apk unlocked today. For more information and resources on chess apk unlocked, you can visit this link: [Chess APK Unlocked: The Ultimate Guide]. <h2>FAQs</h2>
10
- Here are some of the frequently asked questions about chess apk unlocked: - Q: What are some of the best chess apk unlocked files? - A: Some of the best chess apk unlocked files are Chess.com Mod APK, Lichess Mod APK, Chess Tactics Pro Mod APK, Chess Openings Trainer Mod APK, CT-ART Mod APK, Play Magnus Mod APK, Chess24 Mod APK, Chess Free Mod APK, Chess by AI Factory Limited Mod APK, Chesskid Mod APK, Chess Clock Mod APK, Dr. Wolf Mod APK, Chess Adventure for Kids by ChessKid Mod APK, Chessplode Mod APK, Really Bad Chess Mod APK, Shredder Chess Mod APK, Stockfish Engines OEX Mod APK, Mate in 1 Mod APK, Learn Chess with Dr. Wolf Mod APK, Magnus Trainer Mod APK. - Q: Is chess apk unlocked safe and legal? - A: Chess apk unlocked is safe and legal as long as you download it from a reliable and reputable source and install it on your device. However, you should be careful and only download apk files from trusted sources, as some apk files may contain malware or viruses that can harm your device or steal your data. You should also scan the apk file with an antivirus or anti-malware software before installing it on your device. You should also check the permissions and reviews of the apk file before installing it on your device. - Q: Can I play chess apk unlocked offline? - A: Yes, you can play chess apk unlocked offline without an internet connection. However, some features and functions may not be available or may not work properly when you are offline. For example, you may not be able to play online games, join tournaments, watch videos, access puzzles, chat with other players, etc. when you are offline. You may also not be able to update your rating or progress when you are offline. You may also encounter some errors or bugs when you are offline. Therefore, it is recommended that you play chess apk unlocked online whenever possible to enjoy all the features and functions of the app. - Q: How can I update my chess apk unlocked file? - A: To update your chess apk unlocked file, you need to download the latest version of the unlocked chess apk file from the same source that you downloaded it from before and install it on your device. You may need to uninstall the previous version of the unlocked chess apk file before installing the new one. You may also need to enable the installation of unknown sources on your device again before installing the new one. You may also need to backup your data and settings before installing the new one. - Q: What if I have a problem with my chess apk unlocked file? - A: If you have a problem with your chess apk unlocked file, such as an error message, a crash, a freeze, a glitch, etc., you can try some of these solutions: - Restart your device and try again. - Clear the cache and data of the app and try again. - Uninstall and reinstall the app and try again. - Check your internet connection and try again. - Contact the developer or the source of the app for support.</p>
11
- <h2>chess apk unlocked</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash; <a href="https://urlin.us/2uSUch">https://urlin.us/2uSUch</a></b></p><br /><br /> 197e85843d<br />
12
- <br />
13
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dream League Soccer 2023 Hack for iOS Mod APK with Weak Enemies and More.md DELETED
@@ -1,106 +0,0 @@
1
-
2
- <h1>Dream League Soccer 2023 Mod APK Hack Download iOS</h1>
3
- <p>If you are a fan of soccer games, you might have heard of Dream League Soccer 2023, one of the most popular and realistic soccer games on mobile devices. But did you know that you can enjoy the game even more with a mod APK hack that gives you access to unlimited resources and features? In this article, we will tell you everything you need to know about Dream League Soccer 2023 mod APK hack, including its features, how to download and install it on your iOS device, and some frequently asked questions. Let's get started!</p>
4
- <h2>Introduction</h2>
5
- <p>Soccer is one of the most popular sports in the world, and millions of people love to play it on their mobile devices. There are many soccer games available on the app store, but not all of them can offer the same level of realism, graphics, and gameplay as Dream League Soccer 2023. This game is developed by First Touch Games, a renowned studio that specializes in soccer games. Dream League Soccer 2023 is the latest installment in the series, and it comes with many new features and improvements that make it stand out from the rest.</p>
6
- <h2>dream league soccer 2023 mod apk hack download ios</h2><br /><p><b><b>DOWNLOAD</b> &#187;&#187;&#187; <a href="https://jinyurl.com/2uNJcG">https://jinyurl.com/2uNJcG</a></b></p><br /><br />
7
- <h3>What is Dream League Soccer 2023?</h3>
8
- <p>Dream League Soccer 2023 is a soccer simulation game that lets you build your dream team from over 4,000 FIFPRO™ licensed players and take to the field against the world’s best soccer clubs. You can also create your own stadium, customize your kits and logos, and compete in various online and offline modes. The game has stunning graphics, realistic animations, and immersive sound effects that make you feel like you are in the middle of the action. You can also enjoy the game with friends by joining or creating a club and playing online matches with other players around the world.</p>
9
- <h3>Why do you need a mod APK hack for Dream League Soccer 2023?</h3>
10
- <p>As much as Dream League Soccer 2023 is fun and addictive, it also has some limitations that can affect your gaming experience. For example, you need to earn coins and gems to unlock new players, stadiums, kits, and other items. You also need to manage your stamina and avoid fouls that can cost you matches. These things can be frustrating and time-consuming, especially if you want to progress faster and enjoy the game without any restrictions. That's why you need a mod APK hack for Dream League Soccer 2023 that can give you unlimited resources and features that can enhance your gameplay and make you unstoppable.</p>
11
- <h2>Features of Dream League Soccer 2023 Mod APK Hack</h2>
12
- <p>A mod APK hack is a modified version of the original game that has been tweaked to give you access to features that are not available in the official version. For Dream League Soccer 2023, there are many mod APK hacks available on the internet, but not all of them are safe and reliable. Some of them may contain viruses or malware that can harm your device or steal your personal information. Some of them may also not work properly or cause errors or crashes in the game. That's why we recommend you to use the mod APK hack that we have tested and verified for you. This mod APK hack has the following features:</p>
13
- <h3>No Foul</h3>
14
- <p>One of the most annoying things in soccer games is when you get fouled by your opponent or commit a foul yourself. This can result in penalties, free kicks, yellow cards, or red cards that can ruin your chances of winning. With this mod APK hack, you don't have to worry about fouls anymore, as this feature will disable them completely. You can play as aggressively as you want, without any consequences. You can also tackle your opponents without any fear of getting booked or sent off. This feature will give you an edge over your rivals and make the game more fun and exciting.</p>
15
- <h3>Unlimited Stamina</h3>
16
- <p>Another thing that can affect your performance in soccer games is your stamina. Stamina is the energy that your players have to run, dribble, pass, shoot, and defend. As you play, your stamina will decrease, and your players will become slower, weaker, and less responsive. This can make you vulnerable to your opponents and reduce your chances of scoring or winning. With this mod APK hack, you can have unlimited stamina for your players, meaning they will never get tired or exhausted. You can run as fast and as long as you want, without any loss of speed or strength. You can also perform better skills and moves, and dominate the game from start to finish.</p>
17
- <h3>Everything Unlocked</h3>
18
- <p>One of the most appealing features of Dream League Soccer 2023 is the ability to customize your team and stadium with various items and options. You can choose from over 4,000 FIFPRO™ licensed players to build your dream team, and you can also create your own stadium, kits, logos, and more. However, to unlock these items and options, you need to earn coins and gems by playing matches, completing objectives, or watching ads. This can be tedious and time-consuming, especially if you want to unlock everything quickly and easily. With this mod APK hack, you can have everything unlocked from the start, meaning you can access all the players, stadiums, kits, logos, and more without spending any coins or gems. You can also switch between different items and options as you wish, and create your ultimate team and stadium.</p>
19
- <h3>More Features</h3>
20
- <p>Besides the features mentioned above, this mod APK hack also has some other features that can make your gameplay more enjoyable and convenient. Some of these features are:</p>
21
- <ul>
22
- <li>No Ads: You can play the game without any annoying ads that can interrupt your gameplay or waste your time.</li>
23
- <li>No Root: You don't need to root your device to use this mod APK hack, meaning you don't have to risk damaging your device or voiding its warranty.</li>
24
- <li>No Ban: You don't have to worry about getting banned by the game developers or the app store for using this mod APK hack, as it has anti-ban protection that will keep you safe and secure.</li>
25
- <li>Easy to Use: You don't need any technical skills or knowledge to use this mod APK hack, as it has a simple and user-friendly interface that will guide you through the process.</li>
26
- </ul>
27
- <h2>How to download and install Dream League Soccer 2023 Mod APK Hack on iOS devices</h2>
28
- <p>If you are interested in using this mod APK hack for Dream League Soccer 2023 on your iOS device, you need to follow these steps:</p>
29
- <p>dream league soccer 2023 mod apk ios download free<br />
30
- dream league soccer 2023 hack ios no jailbreak<br />
31
- dream league soccer 2023 mod menu apk download for ios<br />
32
- dream league soccer 2023 unlimited coins and gems mod apk ios<br />
33
- dream league soccer 2023 mod apk offline download ios<br />
34
- dream league soccer 2023 hack download ios without human verification<br />
35
- dream league soccer 2023 mega mod apk download ios<br />
36
- dream league soccer 2023 mod apk all players unlocked ios<br />
37
- dream league soccer 2023 hack ios online<br />
38
- dream league soccer 2023 mod apk latest version download ios<br />
39
- dream league soccer 2023 hack tool ios<br />
40
- dream league soccer 2023 mod apk obb download ios<br />
41
- dream league soccer 2023 hack ios app<br />
42
- dream league soccer 2023 mod apk unlimited money and diamond ios<br />
43
- dream league soccer 2023 mod apk data download ios<br />
44
- dream league soccer 2023 hack ios ipa<br />
45
- dream league soccer 2023 mod apk revdl download ios<br />
46
- dream league soccer 2023 hack ios cydia<br />
47
- dream league soccer 2023 mod apk rexdl download ios<br />
48
- dream league soccer 2023 hack ios tutuapp<br />
49
- dream league soccer 2023 mod apk with commentary download ios<br />
50
- dream league soccer 2023 hack ios panda helper<br />
51
- dream league soccer 2023 mod apk new update download ios<br />
52
- dream league soccer 2023 hack ios tweakbox<br />
53
- dream league soccer 2023 mod apk full version download ios<br />
54
- dream league soccer 2023 hack ios appvalley<br />
55
- dream league soccer 2023 mod apk unlocked everything download ios<br />
56
- dream league soccer 2023 hack ios no verification<br />
57
- dream league soccer 2023 mod apk unlimited player development ios<br />
58
- dream league soccer 2023 hack ios reddit<br />
59
- dream league soccer 2023 mod apk profile.dat download ios<br />
60
- dream league soccer 2023 hack ios game guardian<br />
61
- dream league soccer 2023 mod apk unlimited kits and logos ios<br />
62
- dream league soccer 2023 hack ios lucky patcher<br />
63
- dream league soccer 2023 mod apk all teams unlocked ios<br />
64
- dream league soccer 2023 hack ios no survey<br />
65
- dream league soccer 2023 mod apk real madrid team download ios<br />
66
- dream league soccer 2023 hack ios youtube<br />
67
- dream league soccer 2023 mod apk barcelona team download ios<br />
68
- dream league soccer 2023 hack ios generator<br />
69
- dream league soccer 2023 mod apk juventus team download ios<br />
70
- dream league soccer 2023 hack ios telegram<br />
71
- dream league soccer 2023 mod apk liverpool team download ios<br />
72
- dream league soccer 2023 hack ios discord<br />
73
- dream league soccer 2023 mod apk manchester united team download ios<br />
74
- dream league soccer 2023 hack ios facebook<br />
75
- dream league soccer 2023 mod apk psg team download ios<br />
76
- dream league soccer 2023 hack ios twitter<br />
77
- dream league soccer 2023 mod apk bayern munich team download ios</p>
78
- <h3>Step 1: Download the mod IPA file from the link below</h3>
79
- <p>The first thing you need to do is to download the mod IPA file from the link provided below. This is the file that contains the modded version of the game that has all the features that we have discussed above. The file is safe and virus-free, so you don't have to worry about any harm or damage to your device. The file size is about 400 MB, so make sure you have enough storage space on your device before downloading it.</p>
80
- <p><a href="^1^">Download Dream League Soccer 2023 Mod IPA</a></p>
81
- <h3>Step 2: Install the mod IPA file using Cydia Impactor or AltStore</h3>
82
- <p>The next thing you need to do is to install the mod IPA file on your device using either Cydia Impactor or AltStore. These are two tools that allow you to sideload apps on your iOS device without jailbreaking it. You can choose either one of them according to your preference and convenience.</p>
83
- <p>If you want to use Cydia Impactor, you need to download it from <a href="^2^">here</a> and install it on your computer. Then, connect your device to your computer using a USB cable and launch Cydia Impactor. Drag and drop the mod IPA file onto Cydia Impactor and enter your Apple ID and password when prompted. Wait for a few minutes until Cydia Impactor installs the app on your device.</p>
84
- <p>If you want to use AltStore, you need to download it from <a href="^3^">here</a> and install it on both your computer and your device. Then, connect your device to your computer using a USB cable and launch AltStore on both devices. Tap on the "My Apps" tab on AltStore and tap on the "+" icon on the top left corner. Browse and select the mod IPA file from your device and enter your Apple ID and password when prompted. Wait for a few minutes until AltStore installs the app on your device.</p>
85
- <h3>Step 3: Trust the developer profile in Settings > General > Device Management</h3>
86
- <p>The last thing you need to do before launching the game is to trust the developer profile that is associated with the app. This is necessary to avoid any errors or warnings that may prevent you from playing the game. To do this, go to Settings > General > Device Management on your device and find the developer profile that has your Apple ID as its name. Tap on it and tap on "Trust" to confirm. You can now go back to your home screen and launch the game.</p>
87
- <h3>Step 4: Launch the game and enjoy the mod features</h3>
88
- <p>Congratulations! You have successfully installed Dream League Soccer 2023 mod APK hack on your iOS device. You can now launch the game and enjoy all the mod features that we have discussed above. You can play without any limitations, customize your team and stadium, and dominate the game with unlimited resources and features. Have fun!</p>
89
- <h2>Conclusion</h2>
90
- <p>Dream League Soccer 2023 is one of the best soccer games on mobile devices, and it can be even better with a mod APK hack that gives you access to unlimited resources and features. In this article, we have shown you how to download and install Dream League Soccer 2023 mod APK hack on your iOS device using either Cydia Impactor or AltStore. We have also explained the features of this mod APK hack and how they can enhance your gameplay and make you unstoppable. We hope you found this article helpful and informative, and we hope you enjoy playing Dream League Soccer 2023 with this mod APK hack.</p>
91
- <h2>FAQs</h2>
92
- <p>Here are some frequently asked questions about Dream League Soccer 2023 mod APK hack:</p>
93
- <ul>
94
- <li><b>Is this mod APK hack safe to use?</b></li>
95
- <p>Yes, this mod APK hack is safe to use, as it has been tested and verified by us. It does not contain any viruses or malware that can harm your device or steal your personal information. It also has anti-ban protection that will prevent you from getting banned by the game developers or the app store.</p>
96
- <li><b>Will this mod APK hack work on any iOS device?</b></li>
97
- <p>Yes, this mod APK hack will work on any iOS device that supports Dream League Soccer 2023, which is compatible with iOS 10.0 or later. You don't need to jailbreak your device to use this mod APK hack, as it can be installed using either Cydia Impactor or AltStore.</p>
98
- <li><b>Can I update this mod APK hack when a new version of Dream League Soccer 2023 is released?</b></li>
99
- <p>No, you cannot update this mod APK hack when a new version of Dream League Soccer 2023 is released, as it may cause errors or crashes in the game. You need to wait for a new version of this mod APK hack that is compatible with the latest version of Dream League Soccer 2023. You can check our website regularly for updates or subscribe to our newsletter to get notified when a new version of this mod APK hack is available.</p>
100
- <li><b>Can I play online matches with other players using this mod APK hack?</b></li>
101
- <p>Yes, you can play online matches with other players using this mod APK hack, as it does not affect your online connectivity or compatibility. However, you should be careful not to abuse the mod features or show them off to other players, as they may report you or complain about you. You should also respect the rules and etiquette of online gaming and avoid cheating or trolling other players.</p>
102
- <li><b>Can I use this mod APK hack with other mods or hacks for Dream League Soccer 2023?</b></li>
103
- <p>No, you cannot use this mod APK hack with other mods or hacks for Dream League Soccer 2023, as they may conflict with each other or cause errors or crashes in the game. You should only use one mod or hack at a time for Dream League Soccer 2023, and make sure it is compatible with the current version of the game.</p>
104
- </ul></p> 401be4b1e0<br />
105
- <br />
106
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Drive Modern Buses in Realistic Cities with Bus Simulator 2023 - Download Now.md DELETED
@@ -1,119 +0,0 @@
1
- <br />
2
- <h1>Bus Simulator 2023: The Ultimate Bus Driving Game</h1>
3
- <p>Do you love driving buses? Do you want to experience what it's like to be a real bus driver in different cities and countries? Do you want to have fun with your friends in online multiplayer mode? If you answered yes to any of these questions, then you should definitely try Bus Simulator 2023, the most realistic and immersive bus simulation game ever made.</p>
4
- <p>Bus Simulator 2023 is a game that puts you in the driver's seat and lets you become a real bus driver. You can choose from a wide variety of modern city buses, coach buses, school buses, electric buses, hybrid buses, articulated buses, and more. You can also customize your bus as you wish, with paint, accessories, body parts, flags, decals, and more. You can drive your bus in detailed maps all over the world, from San Francisco to Shanghai, from Buenos Aires to Prague, from Dubai to St. Petersburg, and more. You can also enjoy different modes of gameplay, such as career mode, free-ride mode, and online multiplayer mode with friends.</p>
5
- <h2>download bus simulator 2023</h2><br /><p><b><b>Download</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://jinyurl.com/2uNLyI">https://jinyurl.com/2uNLyI</a></b></p><br /><br />
6
- <p>In this article, we will tell you everything you need to know about Bus Simulator 2023, including its features, how to play it, tips and tricks for it, and how to download it for free on your device. So buckle up and get ready for the ride of your life!</p>
7
- <h2>Features of Bus Simulator 2023</h2>
8
- <p>Bus Simulator 2023 is not just a game, it's a simulation of reality. It has many features that make it stand out from other bus games. Here are some of them:</p>
9
- <ul>
10
- <li><b>Realistic maps and buses from around the world</b>: Bus Simulator 2023 features realistic intracity and outside of city maps from different continents and countries. You can drive your bus in United States of America (San Francisco and Texas), South America (Buenos Aires), Europe (Germany, Spain, Prague, St. Petersburg), Dubai, Shanghai, and more. You can also choose from multiple diesel, hybrid, electric, articulated, coach, and school buses that have realistic interiors and exteriors.</li>
11
- <li><b>Career, free-ride and multiplayer modes</b>: Bus Simulator 2023 offers different modes of gameplay for different preferences. In career mode, you can start your own bus company and hire drivers for your buses. You can also create custom routes and schedules for your buses. In free-ride mode, you can drive your bus anywhere you want without any restrictions or objectives. You can explore the city at your own pace and enjoy the scenery. In multiplayer mode, you can join or create online sessions with your friends or other players from around the world. You can chat with them using live chat and cooperate with them in completing routes.</li>
12
- <li><b>Customizable buses and interiors</b>: Bus Simulator 2023 lets you customize your bus as you wish. You can change the paint color, add accessories, body parts, air conditioning, flags, decals, and more. You can also change the interior of your bus by adding seats, steering wheels, mirrors, dashboards, radios, and more. You can also adjust the seat position, the mirrors, the steering wheel, and the pedals to suit your driving style.</li>
13
- <li><b>Intelligent traffic system and weather conditions</b>: Bus Simulator 2023 features an intelligent traffic system that simulates real-life traffic situations. You will encounter different types of vehicles, such as cars, trucks, motorcycles, bicycles, and pedestrians. You will also have to follow the traffic rules, such as speed limits, traffic lights, signs, and signals. You will also have to deal with different weather conditions, such as sunny, cloudy, rainy, snowy, foggy, and stormy. You will have to adapt your driving to the changing road and visibility conditions.</li>
14
- <li><b>Bus company management system</b>: Bus Simulator 2023 allows you to create and manage your own bus company. You can buy and sell buses, hire and fire drivers, assign routes and schedules, monitor the performance and reputation of your company, and compete with other companies in the leaderboards. You can also join or create bus companies with your friends or other players online and cooperate with them in expanding your business.</li>
15
- </ul>
16
- <h2>How to Play Bus Simulator 2023</h2>
17
- <p>Bus Simulator 2023 is easy to play but hard to master. Here are some basic steps on how to play it:</p>
18
- <ol>
19
- <li><b>Choose your bus and route</b>: The first thing you need to do is to choose your bus and route. You can select from a variety of buses that have different specifications, such as speed, capacity, fuel consumption, maintenance cost, and more. You can also select from a variety of routes that have different lengths, difficulties, locations, and rewards. You can also create your own custom routes by choosing the starting point, the destination point, and the waypoints in between.</li>
20
- <li><b>Drive your bus and follow the traffic rules</b>: The next thing you need to do is to drive your bus and follow the traffic rules. You can use the keyboard or the mouse to control your bus. You can also use a gamepad or a steering wheel for a more realistic experience. You can adjust the camera angle by using the mouse wheel or the arrow keys. You can also switch between different camera views by pressing the C key. You can use the indicators by pressing the Q and E keys, the horn by pressing the H key, the headlights by pressing the L key, the wipers by pressing the W key, and the emergency brake by pressing the spacebar. You can also use the map and GPS to navigate your route by pressing the M key.</li>
21
- <li><b>Pick up and drop off passengers</b>: The main objective of Bus Simulator 2023 is to pick up and drop off passengers at designated bus stops. You can see the bus stops on your map and GPS. You can also see the number of passengers waiting at each stop by hovering over them with your mouse cursor. You need to stop your bus at the right position and open the doors by pressing the O key. You need to wait for all passengers to board or exit your bus before closing the doors by pressing the O key again. You need to collect fares from passengers by pressing the F key. You need to be careful not to overcharge or undercharge them as this will affect your reputation.</li>
22
- <li><b>Earn money and reputation points</b>: As you complete your routes, you will earn money and reputation points. Money can be used to buy new buses or upgrade existing ones. Reputation points can be used to unlock new routes or access new features. You can also earn bonuses for driving safely, punctually, comfortably, and environmentally friendly. You can also lose money and reputation points for driving recklessly, late, uncomfortably, or environmentally unfriendly. You can also lose money and reputation points for damaging your bus or causing accidents. You can check your balance and reputation level by pressing the B key.</li>
23
- </ol>
24
- <h2>Tips and Tricks for Bus Simulator 2023</h2>
25
- <p>Bus Simulator 2023 is a challenging game that requires skill and strategy. Here are some tips and tricks that can help you improve your performance and enjoy the game more:</p>
26
- <ul>
27
- <li><b>Use the map and GPS to navigate</b>: The map and GPS are your best friends in Bus Simulator 2023. They can help you find your way around the city and avoid getting lost. You can see the bus stops, the traffic lights, the speed limits, and the road conditions on your map and GPS. You can also see the distance and time remaining for your route. You can zoom in and out of the map by using the mouse wheel or the plus and minus keys. You can also move the map by dragging it with your mouse cursor. You can toggle the map and GPS on and off by pressing the M key.</li>
28
- <li><b>Adjust the camera and controls to your preference</b>: Bus Simulator 2023 allows you to adjust the camera angle and the controls to your preference. You can change the camera angle by using the mouse wheel or the arrow keys. You can also switch between different camera views by pressing the C key. You can choose from cockpit view, front view, rear view, side view, top view, or free view. You can also adjust the sensitivity and inversion of the mouse and keyboard controls in the settings menu. You can also use a gamepad or a steering wheel for a more realistic experience.</li>
29
- <li><b>Follow the speed limit and avoid collisions</b>: One of the most important things in Bus Simulator 2023 is to follow the speed limit and avoid collisions. The speed limit varies depending on the road type, the weather condition, and the traffic situation. You can see the speed limit on your dashboard or on your GPS. You can also see the speed limit signs on the road. You need to slow down when approaching curves, intersections, bus stops, or traffic lights. You also need to avoid colliding with other vehicles, pedestrians, or objects as this will damage your bus and cost you money and reputation points.</li>
30
- <li><b>Use the indicators and horn to communicate with other drivers</b>: Another important thing in Bus Simulator 2023 is to use the indicators and horn to communicate with other drivers. You need to use the indicators by pressing the Q and E keys when turning left or right, changing lanes, or merging into traffic. This will signal your intention to other drivers and prevent accidents. You also need to use the horn by pressing the H key when overtaking, warning, or greeting other drivers. This will alert them of your presence and avoid collisions.</li>
31
- <li><b>Check the weather forecast and plan accordingly</b>: The weather condition in Bus Simulator 2023 affects your driving experience. The weather condition changes dynamically according to real-time data. You can check the weather forecast by pressing the W key. You can see the current temperature, humidity, wind speed, and precipitation. You can also see the forecast for the next hours and days. The weather condition affects the road condition, the visibility, and the traffic behavior. You need to plan your route and driving strategy accordingly. For example, you need to drive more carefully when it's raining or snowing, as the road will be slippery and the visibility will be low. You also need to use the wipers by pressing the W key to clear your windshield. You also need to use the headlights by pressing the L key when it's dark or foggy.</li>
32
- </ul>
33
- <h2>Download Bus Simulator 2023 for Free</h2>
34
- <p>If you are interested in playing Bus Simulator 2023, you will be happy to know that you can download it for free on your device. Bus Simulator 2023 is available for Android, iOS, and Windows devices. Here are the steps on how to download it:</p>
35
- <ol>
36
- <li><b>For Android devices</b>: Go to the Google Play Store and search for Bus Simulator 2023. Tap on the Install button and wait for the download to finish. Alternatively, you can scan this QR code with your device's camera to go directly to the download page:</li>
37
- </ol>
38
- <p><img src="https://www.qrcode-monkey.com/img/default-preview-qr.svg" alt="QR code for Bus Simulator 2023 on Android" width="200" height="200"></p>
39
- <p>download bus simulator 2023 apk<br />
40
- download bus simulator 2023 for android<br />
41
- download bus simulator 2023 for pc<br />
42
- download bus simulator 2023 for windows 10<br />
43
- download bus simulator 2023 for ios<br />
44
- download bus simulator 2023 mod apk<br />
45
- download bus simulator 2023 free<br />
46
- download bus simulator 2023 full version<br />
47
- download bus simulator 2023 online<br />
48
- download bus simulator 2023 offline<br />
49
- download bus simulator 2023 latest version<br />
50
- download bus simulator 2023 game<br />
51
- download bus simulator 2023 ovilex<br />
52
- download bus simulator 2023 microsoft store<br />
53
- download bus simulator 2023 google play<br />
54
- download bus simulator 2023 hack<br />
55
- download bus simulator 2023 cheats<br />
56
- download bus simulator 2023 unlimited money<br />
57
- download bus simulator 2023 update<br />
58
- download bus simulator 2023 new maps<br />
59
- download bus simulator 2023 review<br />
60
- download bus simulator 2023 trailer<br />
61
- download bus simulator 2023 gameplay<br />
62
- download bus simulator 2023 tips and tricks<br />
63
- download bus simulator 2023 guide<br />
64
- download bus simulator 2023 walkthrough<br />
65
- download bus simulator 2023 best buses<br />
66
- download bus simulator 2023 multiplayer<br />
67
- download bus simulator 2023 coop mode<br />
68
- download bus simulator 2023 career mode<br />
69
- download bus simulator 2023 freeride mode<br />
70
- download bus simulator 2023 realistic physics<br />
71
- download bus simulator 2023 graphics settings<br />
72
- download bus simulator 2023 custom routes<br />
73
- download bus simulator 2023 custom buses<br />
74
- download bus simulator 2023 custom skins<br />
75
- download bus simulator 2023 diesel buses<br />
76
- download bus simulator 2023 hybrid buses<br />
77
- download bus simulator 2023 electric buses<br />
78
- download bus simulator 2023 articulated buses<br />
79
- download bus simulator 2023 coach buses<br />
80
- download bus simulator 2023 school buses<br />
81
- download bus simulator 2023 city buses<br />
82
- download bus simulator 2023 usa maps<br />
83
- download bus simulator 2023 europe maps<br />
84
- download bus simulator 2023 asia maps<br />
85
- download bus simulator 2023 south america maps<br />
86
- download bus simulator 2023 dubai map <br />
87
- download bus simulator 2023 shanghai map</p>
88
- <ol start="2">
89
- <li><b>For iOS devices</b>: Go to the App Store and search for Bus Simulator 2023. Tap on the Get button and wait for the download to finish. Alternatively, you can scan this QR code with your device's camera to go directly to the download page:</li>
90
- </ol>
91
- <p><img src="https://www.qrcode-monkey.com/img/default-preview-qr.svg" alt="QR code for Bus Simulator 2023 on iOS" width="200" height="200"></p>
92
- <ol start="3">
93
- <li><b>For Windows devices</b>: Go to the Microsoft Store and search for Bus Simulator 2023. Click on the Get button and wait for the download to finish. Alternatively, you can scan this QR code with your device's camera to go directly to the download page:</li>
94
- </ol>
95
- <p><img src="https://www.qrcode-monkey.com/img/default-preview-qr.svg" alt="QR code for Bus Simulator 2023 on Windows" width="200" height="200"></p>
96
- <ol start="4">
97
- <li><b>How to install and run Bus Simulator 2023 on your device</b>: After downloading Bus Simulator 2023 on your device, you need to install it and run it. To install it, just follow the instructions on your screen. To run it, just tap or click on the Bus Simulator 2023 icon on your home screen or menu.</li>
98
- <li><b>How to access the online multiplayer mode and chat with friends</b>: To access the online multiplayer mode and chat with friends, you need to have an internet connection and a valid account. You can create an account by using your email address or your Facebook account. To join or create an online session, just go to the multiplayer menu and select an option. You can chat with other players by using the live chat feature in the game.</li>
99
- </ol>
100
- <h1>Conclusion</h1>
101
- <p>Bus Simulator 2023 is a game that lets you become a real bus driver and experience what it's like to drive buses in different cities and countries. You can choose from a wide variety of buses, customize them as you wish, drive them in realistic maps, pick up and drop off passengers, earn money and reputation points, manage your own bus company, and have fun with your friends in online multiplayer mode.</p>
102
- <p>Bus Simulator 2023 is a game that is suitable for all ages and preferences. Whether you are a casual gamer or a hardcore gamer, whether you are a bus enthusiast or a bus novice, whether you are looking for a relaxing game or a challenging game, you will find something that suits you in Bus Simulator 2023.</p>
103
- <p>So what are you waiting for? Download Bus Simulator 2023 today and enjoy the best bus driving game ever!</p>
104
- <h2>Frequently Asked Questions</h2>
105
- <p>Here are some frequently asked questions about Bus Simulator 2023:</p>
106
- <ul>
107
- <li><b>Q: Is Bus Simulator 2023 free?</b></li>
108
- <li>A: Yes, Bus Simulator 2023 is free to download and play on Android, iOS, and Windows devices.</li>
109
- <li><b>Q: How realistic is Bus Simulator 2023?</b></li>
110
- <li>A: Bus Simulator 2023 is very realistic in terms of graphics, physics, sound effects, traffic system, weather system, and bus company management system. It also features realistic maps and buses from around the world.</li>
111
- <li><b>Q: How many buses and maps are there in Bus Simulator 2023?</b></li>
112
- <li>A: Bus Simulator 2023 features over 50 buses and over 20 maps from different continents and countries. You can also create your own custom routes by choosing the starting point, the destination point, and the waypoints in between.</li>
113
- <li><b>Q: How can I customize my bus in Bus Simulator 2023?</b></li>
114
- <li>A: You can customize your bus by changing the paint color, adding accessories, body parts, air conditioning, flags, decals, and more. You can also change the interior of your bus by adding seats, steering wheels, mirrors, dashboards, radios, and more. You can also adjust the seat position, the mirrors, the steering wheel, and the pedals to suit your driving style.</li>
115
- <li><b>Q: How can I play with my friends in Bus Simulator 2023?</b></li>
116
- <li>A: You can play with your friends in online multiplayer mode in Bus Simulator 2023. You need to have an internet connection and a valid account. You can create an account by using your email address or your Facebook account. To join or create an online session, just go to the multiplayer menu and select an option. You can chat with your friends by using the live chat feature in the game.</li>
117
- </ul></p> 401be4b1e0<br />
118
- <br />
119
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/CMFNet_dehazing/model/block.py DELETED
@@ -1,146 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- ##########################################################################
4
- def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
5
- layer = nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias, stride=stride)
6
- return layer
7
-
8
-
9
- def conv3x3(in_chn, out_chn, bias=True):
10
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=3, stride=1, padding=1, bias=bias)
11
- return layer
12
-
13
-
14
- def conv_down(in_chn, out_chn, bias=False):
15
- layer = nn.Conv2d(in_chn, out_chn, kernel_size=4, stride=2, padding=1, bias=bias)
16
- return layer
17
-
18
- ##########################################################################
19
- ## Supervised Attention Module (RAM)
20
- class SAM(nn.Module):
21
- def __init__(self, n_feat, kernel_size, bias):
22
- super(SAM, self).__init__()
23
- self.conv1 = conv(n_feat, n_feat, kernel_size, bias=bias)
24
- self.conv2 = conv(n_feat, 3, kernel_size, bias=bias)
25
- self.conv3 = conv(3, n_feat, kernel_size, bias=bias)
26
-
27
- def forward(self, x, x_img):
28
- x1 = self.conv1(x)
29
- img = self.conv2(x) + x_img
30
- x2 = torch.sigmoid(self.conv3(img))
31
- x1 = x1 * x2
32
- x1 = x1 + x
33
- return x1, img
34
-
35
- ##########################################################################
36
- ## Spatial Attention
37
- class SALayer(nn.Module):
38
- def __init__(self, kernel_size=7):
39
- super(SALayer, self).__init__()
40
- self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)
41
- self.sigmoid = nn.Sigmoid()
42
-
43
- def forward(self, x):
44
- avg_out = torch.mean(x, dim=1, keepdim=True)
45
- max_out, _ = torch.max(x, dim=1, keepdim=True)
46
- y = torch.cat([avg_out, max_out], dim=1)
47
- y = self.conv1(y)
48
- y = self.sigmoid(y)
49
- return x * y
50
-
51
- # Spatial Attention Block (SAB)
52
- class SAB(nn.Module):
53
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
54
- super(SAB, self).__init__()
55
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
56
- self.body = nn.Sequential(*modules_body)
57
- self.SA = SALayer(kernel_size=7)
58
-
59
- def forward(self, x):
60
- res = self.body(x)
61
- res = self.SA(res)
62
- res += x
63
- return res
64
-
65
- ##########################################################################
66
- ## Pixel Attention
67
- class PALayer(nn.Module):
68
- def __init__(self, channel, reduction=16, bias=False):
69
- super(PALayer, self).__init__()
70
- self.pa = nn.Sequential(
71
- nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
72
- nn.ReLU(inplace=True),
73
- nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias), # channel <-> 1
74
- nn.Sigmoid()
75
- )
76
-
77
- def forward(self, x):
78
- y = self.pa(x)
79
- return x * y
80
-
81
- ## Pixel Attention Block (PAB)
82
- class PAB(nn.Module):
83
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
84
- super(PAB, self).__init__()
85
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
86
- self.PA = PALayer(n_feat, reduction, bias=bias)
87
- self.body = nn.Sequential(*modules_body)
88
-
89
- def forward(self, x):
90
- res = self.body(x)
91
- res = self.PA(res)
92
- res += x
93
- return res
94
-
95
- ##########################################################################
96
- ## Channel Attention Layer
97
- class CALayer(nn.Module):
98
- def __init__(self, channel, reduction=16, bias=False):
99
- super(CALayer, self).__init__()
100
- # global average pooling: feature --> point
101
- self.avg_pool = nn.AdaptiveAvgPool2d(1)
102
- # feature channel downscale and upscale --> channel weight
103
- self.conv_du = nn.Sequential(
104
- nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
105
- nn.ReLU(inplace=True),
106
- nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias),
107
- nn.Sigmoid()
108
- )
109
-
110
- def forward(self, x):
111
- y = self.avg_pool(x)
112
- y = self.conv_du(y)
113
- return x * y
114
-
115
- ## Channel Attention Block (CAB)
116
- class CAB(nn.Module):
117
- def __init__(self, n_feat, kernel_size, reduction, bias, act):
118
- super(CAB, self).__init__()
119
- modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
120
-
121
- self.CA = CALayer(n_feat, reduction, bias=bias)
122
- self.body = nn.Sequential(*modules_body)
123
-
124
- def forward(self, x):
125
- res = self.body(x)
126
- res = self.CA(res)
127
- res += x
128
- return res
129
-
130
-
131
- if __name__ == "__main__":
132
- import time
133
- from thop import profile
134
- # layer = CAB(64, 3, 4, False, nn.PReLU())
135
- layer = PAB(64, 3, 4, False, nn.PReLU())
136
- # layer = SAB(64, 3, 4, False, nn.PReLU())
137
- for idx, m in enumerate(layer.modules()):
138
- print(idx, "-", m)
139
- s = time.time()
140
-
141
- rgb = torch.ones(1, 64, 256, 256, dtype=torch.float, requires_grad=False)
142
- out = layer(rgb)
143
- flops, params = profile(layer, inputs=(rgb,))
144
- print('parameters:', params)
145
- print('flops', flops)
146
- print('time: {:.4f}ms'.format((time.time()-s)*10))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/infer/lib/uvr5_pack/lib_v5/nets_123812KB.py DELETED
@@ -1,122 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
- from torch import nn
4
-
5
- from . import layers_123821KB as layers
6
-
7
-
8
- class BaseASPPNet(nn.Module):
9
- def __init__(self, nin, ch, dilations=(4, 8, 16)):
10
- super(BaseASPPNet, self).__init__()
11
- self.enc1 = layers.Encoder(nin, ch, 3, 2, 1)
12
- self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1)
13
- self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1)
14
- self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1)
15
-
16
- self.aspp = layers.ASPPModule(ch * 8, ch * 16, dilations)
17
-
18
- self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1)
19
- self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1)
20
- self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1)
21
- self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1)
22
-
23
- def __call__(self, x):
24
- h, e1 = self.enc1(x)
25
- h, e2 = self.enc2(h)
26
- h, e3 = self.enc3(h)
27
- h, e4 = self.enc4(h)
28
-
29
- h = self.aspp(h)
30
-
31
- h = self.dec4(h, e4)
32
- h = self.dec3(h, e3)
33
- h = self.dec2(h, e2)
34
- h = self.dec1(h, e1)
35
-
36
- return h
37
-
38
-
39
- class CascadedASPPNet(nn.Module):
40
- def __init__(self, n_fft):
41
- super(CascadedASPPNet, self).__init__()
42
- self.stg1_low_band_net = BaseASPPNet(2, 32)
43
- self.stg1_high_band_net = BaseASPPNet(2, 32)
44
-
45
- self.stg2_bridge = layers.Conv2DBNActiv(34, 16, 1, 1, 0)
46
- self.stg2_full_band_net = BaseASPPNet(16, 32)
47
-
48
- self.stg3_bridge = layers.Conv2DBNActiv(66, 32, 1, 1, 0)
49
- self.stg3_full_band_net = BaseASPPNet(32, 64)
50
-
51
- self.out = nn.Conv2d(64, 2, 1, bias=False)
52
- self.aux1_out = nn.Conv2d(32, 2, 1, bias=False)
53
- self.aux2_out = nn.Conv2d(32, 2, 1, bias=False)
54
-
55
- self.max_bin = n_fft // 2
56
- self.output_bin = n_fft // 2 + 1
57
-
58
- self.offset = 128
59
-
60
- def forward(self, x, aggressiveness=None):
61
- mix = x.detach()
62
- x = x.clone()
63
-
64
- x = x[:, :, : self.max_bin]
65
-
66
- bandw = x.size()[2] // 2
67
- aux1 = torch.cat(
68
- [
69
- self.stg1_low_band_net(x[:, :, :bandw]),
70
- self.stg1_high_band_net(x[:, :, bandw:]),
71
- ],
72
- dim=2,
73
- )
74
-
75
- h = torch.cat([x, aux1], dim=1)
76
- aux2 = self.stg2_full_band_net(self.stg2_bridge(h))
77
-
78
- h = torch.cat([x, aux1, aux2], dim=1)
79
- h = self.stg3_full_band_net(self.stg3_bridge(h))
80
-
81
- mask = torch.sigmoid(self.out(h))
82
- mask = F.pad(
83
- input=mask,
84
- pad=(0, 0, 0, self.output_bin - mask.size()[2]),
85
- mode="replicate",
86
- )
87
-
88
- if self.training:
89
- aux1 = torch.sigmoid(self.aux1_out(aux1))
90
- aux1 = F.pad(
91
- input=aux1,
92
- pad=(0, 0, 0, self.output_bin - aux1.size()[2]),
93
- mode="replicate",
94
- )
95
- aux2 = torch.sigmoid(self.aux2_out(aux2))
96
- aux2 = F.pad(
97
- input=aux2,
98
- pad=(0, 0, 0, self.output_bin - aux2.size()[2]),
99
- mode="replicate",
100
- )
101
- return mask * mix, aux1 * mix, aux2 * mix
102
- else:
103
- if aggressiveness:
104
- mask[:, :, : aggressiveness["split_bin"]] = torch.pow(
105
- mask[:, :, : aggressiveness["split_bin"]],
106
- 1 + aggressiveness["value"] / 3,
107
- )
108
- mask[:, :, aggressiveness["split_bin"] :] = torch.pow(
109
- mask[:, :, aggressiveness["split_bin"] :],
110
- 1 + aggressiveness["value"],
111
- )
112
-
113
- return mask * mix
114
-
115
- def predict(self, x_mag, aggressiveness=None):
116
- h = self.forward(x_mag, aggressiveness)
117
-
118
- if self.offset > 0:
119
- h = h[:, :, :, self.offset : -self.offset]
120
- assert h.size()[3] > 0
121
-
122
- return h
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/julius/lowpass.py DELETED
@@ -1,181 +0,0 @@
1
- # File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
2
- # Author: adefossez, 2020
3
- """
4
- FIR windowed sinc lowpass filters.
5
- """
6
-
7
- import math
8
- from typing import Sequence, Optional
9
-
10
- import torch
11
- from torch.nn import functional as F
12
-
13
- from .core import sinc
14
- from .fftconv import fft_conv1d
15
- from .utils import simple_repr
16
-
17
-
18
- class LowPassFilters(torch.nn.Module):
19
- """
20
- Bank of low pass filters. Note that a high pass or band pass filter can easily
21
- be implemented by substracting a same signal processed with low pass filters with different
22
- frequencies (see `julius.bands.SplitBands` for instance).
23
- This uses a windowed sinc filter, very similar to the one used in
24
- `julius.resample`. However, because we do not change the sample rate here,
25
- this filter can be much more efficiently implemented using the FFT convolution from
26
- `julius.fftconv`.
27
-
28
- Args:
29
- cutoffs (list[float]): list of cutoff frequencies, in [0, 0.5] expressed as `f/f_s` where
30
- f_s is the samplerate and `f` is the cutoff frequency.
31
- The upper limit is 0.5, because a signal sampled at `f_s` contains only
32
- frequencies under `f_s / 2`.
33
- stride (int): how much to decimate the output. Keep in mind that decimation
34
- of the output is only acceptable if the cutoff frequency is under `1/ (2 * stride)`
35
- of the original sampling rate.
36
- pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`,
37
- the output will have the same length as the input.
38
- zeros (float): Number of zero crossings to keep.
39
- Controls the receptive field of the Finite Impulse Response filter.
40
- For lowpass filters with low cutoff frequency, e.g. 40Hz at 44.1kHz,
41
- it is a bad idea to set this to a high value.
42
- This is likely appropriate for most use. Lower values
43
- will result in a faster filter, but with a slower attenuation around the
44
- cutoff frequency.
45
- fft (bool or None): if True, uses `julius.fftconv` rather than PyTorch convolutions.
46
- If False, uses PyTorch convolutions. If None, either one will be chosen automatically
47
- depending on the effective filter size.
48
-
49
-
50
- ..warning::
51
- All the filters will use the same filter size, aligned on the lowest
52
- frequency provided. If you combine a lot of filters with very diverse frequencies, it might
53
- be more efficient to split them over multiple modules with similar frequencies.
54
-
55
- ..note::
56
- A lowpass with a cutoff frequency of 0 is defined as the null function
57
- by convention here. This allows for a highpass with a cutoff of 0 to
58
- be equal to identity, as defined in `julius.filters.HighPassFilters`.
59
-
60
- Shape:
61
-
62
- - Input: `[*, T]`
63
- - Output: `[F, *, T']`, with `T'=T` if `pad` is True and `stride` is 1, and
64
- `F` is the numer of cutoff frequencies.
65
-
66
- >>> lowpass = LowPassFilters([1/4])
67
- >>> x = torch.randn(4, 12, 21, 1024)
68
- >>> list(lowpass(x).shape)
69
- [1, 4, 12, 21, 1024]
70
- """
71
-
72
- def __init__(self, cutoffs: Sequence[float], stride: int = 1, pad: bool = True,
73
- zeros: float = 8, fft: Optional[bool] = None):
74
- super().__init__()
75
- self.cutoffs = list(cutoffs)
76
- if min(self.cutoffs) < 0:
77
- raise ValueError("Minimum cutoff must be larger than zero.")
78
- if max(self.cutoffs) > 0.5:
79
- raise ValueError("A cutoff above 0.5 does not make sense.")
80
- self.stride = stride
81
- self.pad = pad
82
- self.zeros = zeros
83
- self.half_size = int(zeros / min([c for c in self.cutoffs if c > 0]) / 2)
84
- if fft is None:
85
- fft = self.half_size > 32
86
- self.fft = fft
87
- window = torch.hann_window(2 * self.half_size + 1, periodic=False)
88
- time = torch.arange(-self.half_size, self.half_size + 1)
89
- filters = []
90
- for cutoff in cutoffs:
91
- if cutoff == 0:
92
- filter_ = torch.zeros_like(time)
93
- else:
94
- filter_ = 2 * cutoff * window * sinc(2 * cutoff * math.pi * time)
95
- # Normalize filter to have sum = 1, otherwise we will have a small leakage
96
- # of the constant component in the input signal.
97
- filter_ /= filter_.sum()
98
- filters.append(filter_)
99
- self.register_buffer("filters", torch.stack(filters)[:, None])
100
-
101
- def forward(self, input):
102
- shape = list(input.shape)
103
- input = input.view(-1, 1, shape[-1])
104
- if self.pad:
105
- input = F.pad(input, (self.half_size, self.half_size), mode='replicate')
106
- if self.fft:
107
- out = fft_conv1d(input, self.filters, stride=self.stride)
108
- else:
109
- out = F.conv1d(input, self.filters, stride=self.stride)
110
- shape.insert(0, len(self.cutoffs))
111
- shape[-1] = out.shape[-1]
112
- return out.permute(1, 0, 2).reshape(shape)
113
-
114
- def __repr__(self):
115
- return simple_repr(self)
116
-
117
-
118
- class LowPassFilter(torch.nn.Module):
119
- """
120
- Same as `LowPassFilters` but applies a single low pass filter.
121
-
122
- Shape:
123
-
124
- - Input: `[*, T]`
125
- - Output: `[*, T']`, with `T'=T` if `pad` is True and `stride` is 1.
126
-
127
- >>> lowpass = LowPassFilter(1/4, stride=2)
128
- >>> x = torch.randn(4, 124)
129
- >>> list(lowpass(x).shape)
130
- [4, 62]
131
- """
132
-
133
- def __init__(self, cutoff: float, stride: int = 1, pad: bool = True,
134
- zeros: float = 8, fft: Optional[bool] = None):
135
- super().__init__()
136
- self._lowpasses = LowPassFilters([cutoff], stride, pad, zeros, fft)
137
-
138
- @property
139
- def cutoff(self):
140
- return self._lowpasses.cutoffs[0]
141
-
142
- @property
143
- def stride(self):
144
- return self._lowpasses.stride
145
-
146
- @property
147
- def pad(self):
148
- return self._lowpasses.pad
149
-
150
- @property
151
- def zeros(self):
152
- return self._lowpasses.zeros
153
-
154
- @property
155
- def fft(self):
156
- return self._lowpasses.fft
157
-
158
- def forward(self, input):
159
- return self._lowpasses(input)[0]
160
-
161
- def __repr__(self):
162
- return simple_repr(self)
163
-
164
-
165
- def lowpass_filters(input: torch.Tensor, cutoffs: Sequence[float],
166
- stride: int = 1, pad: bool = True,
167
- zeros: float = 8, fft: Optional[bool] = None):
168
- """
169
- Functional version of `LowPassFilters`, refer to this class for more information.
170
- """
171
- return LowPassFilters(cutoffs, stride, pad, zeros, fft).to(input)(input)
172
-
173
-
174
- def lowpass_filter(input: torch.Tensor, cutoff: float,
175
- stride: int = 1, pad: bool = True,
176
- zeros: float = 8, fft: Optional[bool] = None):
177
- """
178
- Same as `lowpass_filters` but with a single cutoff frequency.
179
- Output will not have a dimension inserted in the front.
180
- """
181
- return lowpass_filters(input, [cutoff], stride, pad, zeros, fft)[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/losses/stft_loss.py DELETED
@@ -1,153 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- # Copyright 2019 Tomoki Hayashi
4
- # MIT License (https://opensource.org/licenses/MIT)
5
-
6
- """STFT-based Loss modules."""
7
-
8
- import torch
9
- import torch.nn.functional as F
10
-
11
-
12
- def stft(x, fft_size, hop_size, win_length, window):
13
- """Perform STFT and convert to magnitude spectrogram.
14
-
15
- Args:
16
- x (Tensor): Input signal tensor (B, T).
17
- fft_size (int): FFT size.
18
- hop_size (int): Hop size.
19
- win_length (int): Window length.
20
- window (str): Window function type.
21
-
22
- Returns:
23
- Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
24
-
25
- """
26
- x_stft = torch.stft(x, fft_size, hop_size, win_length, window)
27
- real = x_stft[..., 0]
28
- imag = x_stft[..., 1]
29
-
30
- # NOTE(kan-bayashi): clamp is needed to avoid nan or inf
31
- return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
32
-
33
-
34
- class SpectralConvergengeLoss(torch.nn.Module):
35
- """Spectral convergence loss module."""
36
-
37
- def __init__(self):
38
- """Initilize spectral convergence loss module."""
39
- super(SpectralConvergengeLoss, self).__init__()
40
-
41
- def forward(self, x_mag, y_mag):
42
- """Calculate forward propagation.
43
-
44
- Args:
45
- x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
46
- y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
47
-
48
- Returns:
49
- Tensor: Spectral convergence loss value.
50
-
51
- """
52
- return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
53
-
54
-
55
- class LogSTFTMagnitudeLoss(torch.nn.Module):
56
- """Log STFT magnitude loss module."""
57
-
58
- def __init__(self):
59
- """Initilize los STFT magnitude loss module."""
60
- super(LogSTFTMagnitudeLoss, self).__init__()
61
-
62
- def forward(self, x_mag, y_mag):
63
- """Calculate forward propagation.
64
-
65
- Args:
66
- x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
67
- y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
68
-
69
- Returns:
70
- Tensor: Log STFT magnitude loss value.
71
-
72
- """
73
- return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
74
-
75
-
76
- class STFTLoss(torch.nn.Module):
77
- """STFT loss module."""
78
-
79
- def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"):
80
- """Initialize STFT loss module."""
81
- super(STFTLoss, self).__init__()
82
- self.fft_size = fft_size
83
- self.shift_size = shift_size
84
- self.win_length = win_length
85
- self.window = getattr(torch, window)(win_length)
86
- self.spectral_convergenge_loss = SpectralConvergengeLoss()
87
- self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
88
-
89
- def forward(self, x, y):
90
- """Calculate forward propagation.
91
-
92
- Args:
93
- x (Tensor): Predicted signal (B, T).
94
- y (Tensor): Groundtruth signal (B, T).
95
-
96
- Returns:
97
- Tensor: Spectral convergence loss value.
98
- Tensor: Log STFT magnitude loss value.
99
-
100
- """
101
- x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
102
- y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
103
- sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
104
- mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
105
-
106
- return sc_loss, mag_loss
107
-
108
-
109
- class MultiResolutionSTFTLoss(torch.nn.Module):
110
- """Multi resolution STFT loss module."""
111
-
112
- def __init__(self,
113
- fft_sizes=[1024, 2048, 512],
114
- hop_sizes=[120, 240, 50],
115
- win_lengths=[600, 1200, 240],
116
- window="hann_window"):
117
- """Initialize Multi resolution STFT loss module.
118
-
119
- Args:
120
- fft_sizes (list): List of FFT sizes.
121
- hop_sizes (list): List of hop sizes.
122
- win_lengths (list): List of window lengths.
123
- window (str): Window function type.
124
-
125
- """
126
- super(MultiResolutionSTFTLoss, self).__init__()
127
- assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
128
- self.stft_losses = torch.nn.ModuleList()
129
- for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
130
- self.stft_losses += [STFTLoss(fs, ss, wl, window)]
131
-
132
- def forward(self, x, y):
133
- """Calculate forward propagation.
134
-
135
- Args:
136
- x (Tensor): Predicted signal (B, T).
137
- y (Tensor): Groundtruth signal (B, T).
138
-
139
- Returns:
140
- Tensor: Multi resolution spectral convergence loss value.
141
- Tensor: Multi resolution log STFT magnitude loss value.
142
-
143
- """
144
- sc_loss = 0.0
145
- mag_loss = 0.0
146
- for f in self.stft_losses:
147
- sc_l, mag_l = f(x, y)
148
- sc_loss += sc_l
149
- mag_loss += mag_l
150
- sc_loss /= len(self.stft_losses)
151
- mag_loss /= len(self.stft_losses)
152
-
153
- return sc_loss, mag_loss
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/models/diffusion/ddpm_audio.py DELETED
@@ -1,1262 +0,0 @@
1
- """
2
- wild mixture of
3
- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
- https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
- https://github.com/CompVis/taming-transformers
6
- -- merci
7
- """
8
- import os
9
- import torch
10
- import torch.nn as nn
11
- import numpy as np
12
- import pytorch_lightning as pl
13
- from torch.optim.lr_scheduler import LambdaLR
14
- from einops import rearrange, repeat
15
- from contextlib import contextmanager
16
- from functools import partial
17
- from tqdm import tqdm
18
- from torchvision.utils import make_grid
19
- from pytorch_lightning.utilities.distributed import rank_zero_only
20
-
21
- from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
22
- from ldm.modules.ema import LitEma
23
- from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
24
- from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
25
- from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
26
- from ldm.models.diffusion.ddim import DDIMSampler
27
- from ldm.models.diffusion.ddpm import DDPM, disabled_train
28
- from omegaconf import ListConfig
29
-
30
- __conditioning_keys__ = {'concat': 'c_concat',
31
- 'crossattn': 'c_crossattn',
32
- 'adm': 'y'}
33
-
34
-
35
- class LatentDiffusion_audio(DDPM):
36
- """main class"""
37
- def __init__(self,
38
- first_stage_config,
39
- cond_stage_config,
40
- num_timesteps_cond=None,
41
- mel_dim=80,
42
- mel_length=848,
43
- cond_stage_key="image",
44
- cond_stage_trainable=False,
45
- concat_mode=True,
46
- cond_stage_forward=None,
47
- conditioning_key=None,
48
- scale_factor=1.0,
49
- scale_by_std=False,
50
- *args, **kwargs):
51
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
52
- self.scale_by_std = scale_by_std
53
- assert self.num_timesteps_cond <= kwargs['timesteps']
54
- # for backwards compatibility after implementation of DiffusionWrapper
55
- if conditioning_key is None:
56
- conditioning_key = 'concat' if concat_mode else 'crossattn'
57
- if cond_stage_config == '__is_unconditional__':
58
- conditioning_key = None
59
- ckpt_path = kwargs.pop("ckpt_path", None)
60
- ignore_keys = kwargs.pop("ignore_keys", [])
61
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
62
- self.concat_mode = concat_mode
63
- self.mel_dim = mel_dim
64
- self.mel_length = mel_length
65
- self.cond_stage_trainable = cond_stage_trainable
66
- self.cond_stage_key = cond_stage_key
67
- try:
68
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
69
- except:
70
- self.num_downs = 0
71
- if not scale_by_std:
72
- self.scale_factor = scale_factor
73
- else:
74
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
75
- self.instantiate_first_stage(first_stage_config)
76
- self.instantiate_cond_stage(cond_stage_config)
77
- self.cond_stage_forward = cond_stage_forward
78
- self.clip_denoised = False
79
- self.bbox_tokenizer = None
80
-
81
- self.restarted_from_ckpt = False
82
- if ckpt_path is not None:
83
- self.init_from_ckpt(ckpt_path, ignore_keys)
84
- self.restarted_from_ckpt = True
85
-
86
- def make_cond_schedule(self, ):
87
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
88
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
89
- self.cond_ids[:self.num_timesteps_cond] = ids
90
-
91
- @rank_zero_only
92
- @torch.no_grad()
93
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
94
- # only for very first batch
95
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
96
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
97
- # set rescale weight to 1./std of encodings
98
- print("### USING STD-RESCALING ###")
99
- x = super().get_input(batch, self.first_stage_key)
100
- x = x.to(self.device)
101
- encoder_posterior = self.encode_first_stage(x)
102
- z = self.get_first_stage_encoding(encoder_posterior).detach()
103
- del self.scale_factor
104
- self.register_buffer('scale_factor', 1. / z.flatten().std())
105
- print(f"setting self.scale_factor to {self.scale_factor}")
106
- print("### USING STD-RESCALING ###")
107
-
108
- def register_schedule(self,
109
- given_betas=None, beta_schedule="linear", timesteps=1000,
110
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
111
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
112
-
113
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
114
- if self.shorten_cond_schedule:
115
- self.make_cond_schedule()
116
-
117
- def instantiate_first_stage(self, config):
118
- model = instantiate_from_config(config)
119
- self.first_stage_model = model.eval()
120
- self.first_stage_model.train = disabled_train
121
- for param in self.first_stage_model.parameters():
122
- param.requires_grad = False
123
-
124
- def instantiate_cond_stage(self, config):
125
- if not self.cond_stage_trainable:
126
- if config == "__is_first_stage__":
127
- print("Using first stage also as cond stage.")
128
- self.cond_stage_model = self.first_stage_model
129
- elif config == "__is_unconditional__":
130
- print(f"Training {self.__class__.__name__} as an unconditional model.")
131
- self.cond_stage_model = None
132
- # self.be_unconditional = True
133
- else:
134
- model = instantiate_from_config(config)
135
- self.cond_stage_model = model.eval()
136
- self.cond_stage_model.train = disabled_train
137
- for param in self.cond_stage_model.parameters():
138
- param.requires_grad = False
139
- else:
140
- assert config != '__is_first_stage__'
141
- assert config != '__is_unconditional__'
142
- model = instantiate_from_config(config)
143
- self.cond_stage_model = model
144
-
145
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
146
- denoise_row = []
147
- for zd in tqdm(samples, desc=desc):
148
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
149
- force_not_quantize=force_no_decoder_quantization))
150
- n_imgs_per_row = len(denoise_row)
151
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
152
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
153
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
154
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
155
- return denoise_grid
156
-
157
- def get_first_stage_encoding(self, encoder_posterior):
158
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
159
- z = encoder_posterior.sample()
160
- elif isinstance(encoder_posterior, torch.Tensor):
161
- z = encoder_posterior
162
- else:
163
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
164
- return self.scale_factor * z
165
-
166
- def get_learned_conditioning(self, c):
167
- if self.cond_stage_forward is None:
168
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
169
- c = self.cond_stage_model.encode(c)
170
- if isinstance(c, DiagonalGaussianDistribution):
171
- c = c.mode()
172
- else:
173
- c = self.cond_stage_model(c)
174
- else:
175
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
176
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
177
- return c
178
-
179
-
180
- @torch.no_grad()
181
- def get_unconditional_conditioning(self, batch_size, null_label=None):
182
- if null_label is not None:
183
- xc = null_label
184
- if isinstance(xc, ListConfig):
185
- xc = list(xc)
186
- if isinstance(xc, dict) or isinstance(xc, list):
187
- c = self.get_learned_conditioning(xc)
188
- else:
189
- if hasattr(xc, "to"):
190
- xc = xc.to(self.device)
191
- c = self.get_learned_conditioning(xc)
192
- else:
193
- if self.cond_stage_key in ["class_label", "cls"]:
194
- xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
195
- return self.get_learned_conditioning(xc)
196
- else:
197
- raise NotImplementedError("todo")
198
- if isinstance(c, list): # in case the encoder gives us a list
199
- for i in range(len(c)):
200
- c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
201
- else:
202
- c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
203
- return c
204
-
205
- def meshgrid(self, h, w):
206
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
207
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
208
-
209
- arr = torch.cat([y, x], dim=-1)
210
- return arr
211
-
212
- def delta_border(self, h, w):
213
- """
214
- :param h: height
215
- :param w: width
216
- :return: normalized distance to image border,
217
- wtith min distance = 0 at border and max dist = 0.5 at image center
218
- """
219
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
220
- arr = self.meshgrid(h, w) / lower_right_corner
221
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
222
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
223
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
224
- return edge_dist
225
-
226
- def get_weighting(self, h, w, Ly, Lx, device):
227
- weighting = self.delta_border(h, w)
228
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
229
- self.split_input_params["clip_max_weight"], )
230
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
231
-
232
- if self.split_input_params["tie_braker"]:
233
- L_weighting = self.delta_border(Ly, Lx)
234
- L_weighting = torch.clip(L_weighting,
235
- self.split_input_params["clip_min_tie_weight"],
236
- self.split_input_params["clip_max_tie_weight"])
237
-
238
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
239
- weighting = weighting * L_weighting
240
- return weighting
241
-
242
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
243
- """
244
- :param x: img of size (bs, c, h, w)
245
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
246
- """
247
- bs, nc, h, w = x.shape
248
-
249
- # number of crops in image
250
- Ly = (h - kernel_size[0]) // stride[0] + 1
251
- Lx = (w - kernel_size[1]) // stride[1] + 1
252
-
253
- if uf == 1 and df == 1:
254
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
255
- unfold = torch.nn.Unfold(**fold_params)
256
-
257
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
258
-
259
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
260
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
261
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
262
-
263
- elif uf > 1 and df == 1:
264
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
265
- unfold = torch.nn.Unfold(**fold_params)
266
-
267
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
268
- dilation=1, padding=0,
269
- stride=(stride[0] * uf, stride[1] * uf))
270
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
271
-
272
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
273
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
274
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
275
-
276
- elif df > 1 and uf == 1:
277
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
278
- unfold = torch.nn.Unfold(**fold_params)
279
-
280
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
281
- dilation=1, padding=0,
282
- stride=(stride[0] // df, stride[1] // df))
283
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
284
-
285
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
286
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
287
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
288
-
289
- else:
290
- raise NotImplementedError
291
-
292
- return fold, unfold, normalization, weighting
293
-
294
- @torch.no_grad()
295
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
296
- cond_key=None, return_original_cond=False, bs=None):
297
- x = super().get_input(batch, k)
298
- if bs is not None:
299
- x = x[:bs]
300
- x = x.to(self.device)
301
- encoder_posterior = self.encode_first_stage(x)
302
- z = self.get_first_stage_encoding(encoder_posterior).detach()
303
-
304
- if self.model.conditioning_key is not None:
305
- if cond_key is None:
306
- cond_key = self.cond_stage_key
307
- if cond_key != self.first_stage_key:
308
- if cond_key in ['caption', 'coordinates_bbox']:
309
- xc = batch[cond_key]
310
- elif cond_key == 'class_label':
311
- xc = batch
312
- else:
313
- xc = super().get_input(batch, cond_key).to(self.device)
314
- else:
315
- xc = x
316
- if not self.cond_stage_trainable or force_c_encode:
317
- if isinstance(xc, dict) or isinstance(xc, list):
318
- # import pudb; pudb.set_trace()
319
- c = self.get_learned_conditioning(xc)
320
- else:
321
- c = self.get_learned_conditioning(xc.to(self.device))
322
- else:
323
- c = xc
324
- if bs is not None:
325
- c = c[:bs]
326
- # Testing #
327
- if cond_key == 'masked_image':
328
- mask = super().get_input(batch, "mask")
329
- cc = torch.nn.functional.interpolate(mask, size=c.shape[-2:]) # [B, 1, 10, 106]
330
- c = torch.cat((c, cc), dim=1) # [B, 5, 10, 106]
331
- # Testing #
332
- if self.use_positional_encodings:
333
- pos_x, pos_y = self.compute_latent_shifts(batch)
334
- ckey = __conditioning_keys__[self.model.conditioning_key]
335
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
336
-
337
- else:
338
- c = None
339
- xc = None
340
- if self.use_positional_encodings:
341
- pos_x, pos_y = self.compute_latent_shifts(batch)
342
- c = {'pos_x': pos_x, 'pos_y': pos_y}
343
- out = [z, c]
344
- if return_first_stage_outputs:
345
- xrec = self.decode_first_stage(z)
346
- out.extend([x, xrec])
347
- if return_original_cond:
348
- out.append(xc)
349
- return out
350
-
351
- @torch.no_grad()
352
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
353
- if predict_cids:
354
- if z.dim() == 4:
355
- z = torch.argmax(z.exp(), dim=1).long()
356
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
357
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
358
-
359
- z = 1. / self.scale_factor * z
360
-
361
- if hasattr(self, "split_input_params"):
362
- if self.split_input_params["patch_distributed_vq"]:
363
- ks = self.split_input_params["ks"] # eg. (128, 128)
364
- stride = self.split_input_params["stride"] # eg. (64, 64)
365
- uf = self.split_input_params["vqf"]
366
- bs, nc, h, w = z.shape
367
- if ks[0] > h or ks[1] > w:
368
- ks = (min(ks[0], h), min(ks[1], w))
369
- print("reducing Kernel")
370
-
371
- if stride[0] > h or stride[1] > w:
372
- stride = (min(stride[0], h), min(stride[1], w))
373
- print("reducing stride")
374
-
375
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
376
-
377
- z = unfold(z) # (bn, nc * prod(**ks), L)
378
- # 1. Reshape to img shape
379
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
380
-
381
- # 2. apply model loop over last dim
382
- if isinstance(self.first_stage_model, VQModelInterface):
383
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
384
- force_not_quantize=predict_cids or force_not_quantize)
385
- for i in range(z.shape[-1])]
386
- else:
387
-
388
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
389
- for i in range(z.shape[-1])]
390
-
391
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
392
- o = o * weighting
393
- # Reverse 1. reshape to img shape
394
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
395
- # stitch crops together
396
- decoded = fold(o)
397
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
398
- return decoded
399
- else:
400
- if isinstance(self.first_stage_model, VQModelInterface):
401
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
402
- else:
403
- return self.first_stage_model.decode(z)
404
-
405
- else:
406
- if isinstance(self.first_stage_model, VQModelInterface):
407
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
408
- else:
409
- return self.first_stage_model.decode(z)
410
-
411
- # same as above but without decorator
412
- def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
413
- if predict_cids:
414
- if z.dim() == 4:
415
- z = torch.argmax(z.exp(), dim=1).long()
416
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
417
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
418
-
419
- z = 1. / self.scale_factor * z
420
-
421
- if hasattr(self, "split_input_params"):
422
- if self.split_input_params["patch_distributed_vq"]:
423
- ks = self.split_input_params["ks"] # eg. (128, 128)
424
- stride = self.split_input_params["stride"] # eg. (64, 64)
425
- uf = self.split_input_params["vqf"]
426
- bs, nc, h, w = z.shape
427
- if ks[0] > h or ks[1] > w:
428
- ks = (min(ks[0], h), min(ks[1], w))
429
- print("reducing Kernel")
430
-
431
- if stride[0] > h or stride[1] > w:
432
- stride = (min(stride[0], h), min(stride[1], w))
433
- print("reducing stride")
434
-
435
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
436
-
437
- z = unfold(z) # (bn, nc * prod(**ks), L)
438
- # 1. Reshape to img shape
439
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
440
-
441
- # 2. apply model loop over last dim
442
- if isinstance(self.first_stage_model, VQModelInterface):
443
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
444
- force_not_quantize=predict_cids or force_not_quantize)
445
- for i in range(z.shape[-1])]
446
- else:
447
-
448
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
449
- for i in range(z.shape[-1])]
450
-
451
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
452
- o = o * weighting
453
- # Reverse 1. reshape to img shape
454
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
455
- # stitch crops together
456
- decoded = fold(o)
457
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
458
- return decoded
459
- else:
460
- if isinstance(self.first_stage_model, VQModelInterface):
461
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
462
- else:
463
- return self.first_stage_model.decode(z)
464
-
465
- else:
466
- if isinstance(self.first_stage_model, VQModelInterface):
467
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
468
- else:
469
- return self.first_stage_model.decode(z)
470
-
471
- @torch.no_grad()
472
- def encode_first_stage(self, x):
473
- if hasattr(self, "split_input_params"):
474
- if self.split_input_params["patch_distributed_vq"]:
475
- ks = self.split_input_params["ks"] # eg. (128, 128)
476
- stride = self.split_input_params["stride"] # eg. (64, 64)
477
- df = self.split_input_params["vqf"]
478
- self.split_input_params['original_image_size'] = x.shape[-2:]
479
- bs, nc, h, w = x.shape
480
- if ks[0] > h or ks[1] > w:
481
- ks = (min(ks[0], h), min(ks[1], w))
482
- print("reducing Kernel")
483
-
484
- if stride[0] > h or stride[1] > w:
485
- stride = (min(stride[0], h), min(stride[1], w))
486
- print("reducing stride")
487
-
488
- fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
489
- z = unfold(x) # (bn, nc * prod(**ks), L)
490
- # Reshape to img shape
491
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
492
-
493
- output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
494
- for i in range(z.shape[-1])]
495
-
496
- o = torch.stack(output_list, axis=-1)
497
- o = o * weighting
498
-
499
- # Reverse reshape to img shape
500
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
501
- # stitch crops together
502
- decoded = fold(o)
503
- decoded = decoded / normalization
504
- return decoded
505
-
506
- else:
507
- return self.first_stage_model.encode(x)
508
- else:
509
- return self.first_stage_model.encode(x)
510
-
511
- def shared_step(self, batch, **kwargs):
512
- x, c = self.get_input(batch, self.first_stage_key)
513
- loss = self(x, c)
514
- return loss
515
-
516
- def test_step(self,batch,batch_idx):
517
- cond = batch[self.cond_stage_key] * self.test_repeat
518
- cond = self.get_learned_conditioning(cond) # c: string -> [B, T, Context_dim]
519
- batch_size = len(cond)
520
- enc_emb = self.sample(cond,batch_size,timesteps=self.test_numsteps)# shape = [batch_size,self.channels,self.mel_dim,self.mel_length]
521
- xrec = self.decode_first_stage(enc_emb)
522
- reconstructions = (xrec + 1)/2 # to mel scale
523
- test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
524
- savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
525
- if not os.path.exists(savedir):
526
- os.makedirs(savedir)
527
-
528
- file_names = batch['f_name']
529
- nfiles = len(file_names)
530
- reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
531
- for k in range(reconstructions.shape[0]):
532
- b,repeat = k % nfiles, k // nfiles
533
- vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
534
- v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
535
- save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}_{repeat}.npy')# the num_th caption, the repeat_th repitition
536
- np.save(save_img_path,reconstructions[b])
537
-
538
- return None
539
-
540
- def forward(self, x, c, *args, **kwargs):
541
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
542
- if self.model.conditioning_key is not None:
543
- assert c is not None
544
- if self.cond_stage_trainable:
545
- c = self.get_learned_conditioning(c) # c: string -> [B, T, Context_dim]
546
- if self.shorten_cond_schedule: # TODO: drop this option
547
- tc = self.cond_ids[t].to(self.device)
548
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
549
- return self.p_losses(x, c, t, *args, **kwargs)
550
-
551
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
552
- def rescale_bbox(bbox):
553
- x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
554
- y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
555
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
556
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
557
- return x0, y0, w, h
558
-
559
- return [rescale_bbox(b) for b in bboxes]
560
-
561
- def apply_model(self, x_noisy, t, cond, return_ids=False):
562
-
563
- if isinstance(cond, dict):
564
- # hybrid case, cond is exptected to be a dict
565
- pass
566
- else:
567
- if not isinstance(cond, list):
568
- cond = [cond]
569
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
570
- cond = {key: cond}
571
-
572
- if hasattr(self, "split_input_params"):
573
- assert len(cond) == 1 # todo can only deal with one conditioning atm
574
- assert not return_ids
575
- ks = self.split_input_params["ks"] # eg. (128, 128)
576
- stride = self.split_input_params["stride"] # eg. (64, 64)
577
-
578
- h, w = x_noisy.shape[-2:]
579
-
580
- fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
581
-
582
- z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
583
- # Reshape to img shape
584
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
585
- z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
586
-
587
- if self.cond_stage_key in ["image", "LR_image", "segmentation",
588
- 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
589
- c_key = next(iter(cond.keys())) # get key
590
- c = next(iter(cond.values())) # get value
591
- assert (len(c) == 1) # todo extend to list with more than one elem
592
- c = c[0] # get element
593
-
594
- c = unfold(c)
595
- c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
596
-
597
- cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
598
-
599
- elif self.cond_stage_key == 'coordinates_bbox':
600
- assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
601
-
602
- # assuming padding of unfold is always 0 and its dilation is always 1
603
- n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
604
- full_img_h, full_img_w = self.split_input_params['original_image_size']
605
- # as we are operating on latents, we need the factor from the original image size to the
606
- # spatial latent size to properly rescale the crops for regenerating the bbox annotations
607
- num_downs = self.first_stage_model.encoder.num_resolutions - 1
608
- rescale_latent = 2 ** (num_downs)
609
-
610
- # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
611
- # need to rescale the tl patch coordinates to be in between (0,1)
612
- tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
613
- rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
614
- for patch_nr in range(z.shape[-1])]
615
-
616
- # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
617
- patch_limits = [(x_tl, y_tl,
618
- rescale_latent * ks[0] / full_img_w,
619
- rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
620
- # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
621
-
622
- # tokenize crop coordinates for the bounding boxes of the respective patches
623
- patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
624
- for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
625
- print(patch_limits_tknzd[0].shape)
626
- # cut tknzd crop position from conditioning
627
- assert isinstance(cond, dict), 'cond must be dict to be fed into model'
628
- cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
629
- print(cut_cond.shape)
630
-
631
- adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
632
- adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
633
- print(adapted_cond.shape)
634
- adapted_cond = self.get_learned_conditioning(adapted_cond)
635
- print(adapted_cond.shape)
636
- adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
637
- print(adapted_cond.shape)
638
-
639
- cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
640
-
641
- else:
642
- cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
643
-
644
- # apply model by loop over crops
645
- output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
646
- assert not isinstance(output_list[0],
647
- tuple) # todo cant deal with multiple model outputs check this never happens
648
-
649
- o = torch.stack(output_list, axis=-1)
650
- o = o * weighting
651
- # Reverse reshape to img shape
652
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
653
- # stitch crops together
654
- x_recon = fold(o) / normalization
655
-
656
- else:
657
- x_recon = self.model(x_noisy, t, **cond)
658
-
659
- if isinstance(x_recon, tuple) and not return_ids:
660
- return x_recon[0]
661
- else:
662
- return x_recon
663
-
664
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
665
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
666
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
667
-
668
- def _prior_bpd(self, x_start):
669
- """
670
- Get the prior KL term for the variational lower-bound, measured in
671
- bits-per-dim.
672
- This term can't be optimized, as it only depends on the encoder.
673
- :param x_start: the [N x C x ...] tensor of inputs.
674
- :return: a batch of [N] KL values (in bits), one per batch element.
675
- """
676
- batch_size = x_start.shape[0]
677
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
678
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
679
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
680
- return mean_flat(kl_prior) / np.log(2.0)
681
-
682
- def p_losses(self, x_start, cond, t, noise=None):
683
- noise = default(noise, lambda: torch.randn_like(x_start))
684
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
685
- model_output = self.apply_model(x_noisy, t, cond)
686
-
687
- loss_dict = {}
688
- prefix = 'train' if self.training else 'val'
689
-
690
- if self.parameterization == "x0":
691
- target = x_start
692
- elif self.parameterization == "eps":
693
- target = noise
694
- else:
695
- raise NotImplementedError()
696
-
697
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
698
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
699
-
700
- logvar_t = self.logvar[t].to(self.device)
701
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
702
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
703
- if self.learn_logvar:
704
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
705
- loss_dict.update({'logvar': self.logvar.data.mean()})
706
-
707
- loss = self.l_simple_weight * loss.mean()
708
-
709
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
710
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
711
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
712
- loss += (self.original_elbo_weight * loss_vlb)
713
- loss_dict.update({f'{prefix}/loss': loss})
714
-
715
- return loss, loss_dict
716
-
717
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
718
- return_x0=False, score_corrector=None, corrector_kwargs=None):
719
- t_in = t
720
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
721
-
722
- if score_corrector is not None:
723
- assert self.parameterization == "eps"
724
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
725
-
726
- if return_codebook_ids:
727
- model_out, logits = model_out
728
-
729
- if self.parameterization == "eps":
730
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
731
- elif self.parameterization == "x0":
732
- x_recon = model_out
733
- else:
734
- raise NotImplementedError()
735
-
736
- if clip_denoised:
737
- x_recon.clamp_(-1., 1.)
738
- if quantize_denoised:
739
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
740
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
741
- if return_codebook_ids:
742
- return model_mean, posterior_variance, posterior_log_variance, logits
743
- elif return_x0:
744
- return model_mean, posterior_variance, posterior_log_variance, x_recon
745
- else:
746
- return model_mean, posterior_variance, posterior_log_variance
747
-
748
- @torch.no_grad()
749
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
750
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
751
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
752
- b, *_, device = *x.shape, x.device
753
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
754
- return_codebook_ids=return_codebook_ids,
755
- quantize_denoised=quantize_denoised,
756
- return_x0=return_x0,
757
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
758
- if return_codebook_ids:
759
- raise DeprecationWarning("Support dropped.")
760
- model_mean, _, model_log_variance, logits = outputs
761
- elif return_x0:
762
- model_mean, _, model_log_variance, x0 = outputs
763
- else:
764
- model_mean, _, model_log_variance = outputs
765
-
766
- noise = noise_like(x.shape, device, repeat_noise) * temperature
767
- if noise_dropout > 0.:
768
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
769
- # no noise when t == 0
770
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
771
-
772
- if return_codebook_ids:
773
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
774
- if return_x0:
775
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
776
- else:
777
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
778
-
779
- @torch.no_grad()
780
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
781
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
782
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
783
- log_every_t=None):
784
- if not log_every_t:
785
- log_every_t = self.log_every_t
786
- timesteps = self.num_timesteps
787
- if batch_size is not None:
788
- b = batch_size if batch_size is not None else shape[0]
789
- shape = [batch_size] + list(shape)
790
- else:
791
- b = batch_size = shape[0]
792
- if x_T is None:
793
- img = torch.randn(shape, device=self.device)
794
- else:
795
- img = x_T
796
- intermediates = []
797
- if cond is not None:
798
- if isinstance(cond, dict):
799
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
800
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
801
- else:
802
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
803
-
804
- if start_T is not None:
805
- timesteps = min(timesteps, start_T)
806
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
807
- total=timesteps) if verbose else reversed(
808
- range(0, timesteps))
809
- if type(temperature) == float:
810
- temperature = [temperature] * timesteps
811
-
812
- for i in iterator:
813
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
814
- if self.shorten_cond_schedule:
815
- assert self.model.conditioning_key != 'hybrid'
816
- tc = self.cond_ids[ts].to(cond.device)
817
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
818
-
819
- img, x0_partial = self.p_sample(img, cond, ts,
820
- clip_denoised=self.clip_denoised,
821
- quantize_denoised=quantize_denoised, return_x0=True,
822
- temperature=temperature[i], noise_dropout=noise_dropout,
823
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
824
- if mask is not None:
825
- assert x0 is not None
826
- img_orig = self.q_sample(x0, ts)
827
- img = img_orig * mask + (1. - mask) * img
828
-
829
- if i % log_every_t == 0 or i == timesteps - 1:
830
- intermediates.append(x0_partial)
831
- if callback: callback(i)
832
- if img_callback: img_callback(img, i)
833
- return img, intermediates
834
-
835
- @torch.no_grad()
836
- def p_sample_loop(self, cond, shape, return_intermediates=False,
837
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
838
- mask=None, x0=None, img_callback=None, start_T=None,
839
- log_every_t=None):
840
-
841
- if not log_every_t:
842
- log_every_t = self.log_every_t
843
- device = self.betas.device
844
- b = shape[0]
845
- if x_T is None:
846
- img = torch.randn(shape, device=device)
847
- else:
848
- img = x_T
849
-
850
- intermediates = [img]
851
- if timesteps is None:
852
- timesteps = self.num_timesteps
853
-
854
- if start_T is not None:
855
- timesteps = min(timesteps, start_T)
856
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
857
- range(0, timesteps))
858
-
859
- if mask is not None:
860
- assert x0 is not None
861
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
862
-
863
- for i in iterator:
864
- ts = torch.full((b,), i, device=device, dtype=torch.long)
865
- if self.shorten_cond_schedule:
866
- assert self.model.conditioning_key != 'hybrid'
867
- tc = self.cond_ids[ts].to(cond.device)
868
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
869
-
870
- img = self.p_sample(img, cond, ts,
871
- clip_denoised=self.clip_denoised,
872
- quantize_denoised=quantize_denoised)
873
- if mask is not None:
874
- img_orig = self.q_sample(x0, ts)
875
- img = img_orig * mask + (1. - mask) * img
876
-
877
- if i % log_every_t == 0 or i == timesteps - 1:
878
- intermediates.append(img)
879
- if callback: callback(i)
880
- if img_callback: img_callback(img, i)
881
-
882
- if return_intermediates:
883
- return img, intermediates
884
- return img
885
-
886
- @torch.no_grad()
887
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
888
- verbose=True, timesteps=None, quantize_denoised=False,
889
- mask=None, x0=None, shape=None,**kwargs):
890
- if shape is None:
891
- shape = (batch_size, self.channels, self.mel_dim, self.mel_length)
892
- if cond is not None:
893
- if isinstance(cond, dict):
894
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
895
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
896
- else:
897
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
898
- return self.p_sample_loop(cond,
899
- shape,
900
- return_intermediates=return_intermediates, x_T=x_T,
901
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
902
- mask=mask, x0=x0)
903
-
904
- @torch.no_grad()
905
- def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
906
-
907
- if ddim:
908
- ddim_sampler = DDIMSampler(self)
909
- shape = (self.channels, self.mel_dim, self.mel_length)
910
- samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
911
- shape,cond,verbose=False,**kwargs)
912
-
913
- else:
914
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
915
- return_intermediates=True,**kwargs)
916
-
917
- return samples, intermediates
918
-
919
-
920
- @torch.no_grad()
921
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
922
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
923
- plot_diffusion_rows=True, **kwargs):
924
-
925
- use_ddim = ddim_steps is not None
926
-
927
- log = dict()
928
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
929
- return_first_stage_outputs=True,
930
- force_c_encode=True,
931
- return_original_cond=True,
932
- bs=N)
933
- N = min(x.shape[0], N)
934
- n_row = min(x.shape[0], n_row)
935
- log["inputs"] = x
936
- log["reconstruction"] = xrec
937
- if self.model.conditioning_key is not None:
938
- if hasattr(self.cond_stage_model, "decode") and self.cond_stage_key != "masked_image":
939
- xc = self.cond_stage_model.decode(c)
940
- log["conditioning"] = xc
941
- elif self.cond_stage_key == "masked_image":
942
- log["mask"] = c[:, -1, :, :][:, None, :, :]
943
- xc = self.cond_stage_model.decode(c[:, :self.cond_stage_model.embed_dim, :, :])
944
- log["conditioning"] = xc
945
- elif self.cond_stage_key in ["caption"]:
946
- xc = log_txt_as_img((256, 256), batch["caption"])
947
- log["conditioning"] = xc
948
- elif self.cond_stage_key == 'class_label':
949
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
950
- log['conditioning'] = xc
951
- elif isimage(xc):
952
- log["conditioning"] = xc
953
- if ismap(xc):
954
- log["original_conditioning"] = self.to_rgb(xc)
955
-
956
- if plot_diffusion_rows:
957
- # get diffusion row
958
- diffusion_row = list()
959
- z_start = z[:n_row]
960
- for t in range(self.num_timesteps):
961
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
962
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
963
- t = t.to(self.device).long()
964
- noise = torch.randn_like(z_start)
965
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
966
- diffusion_row.append(self.decode_first_stage(z_noisy))
967
-
968
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
969
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
970
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
971
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
972
- log["diffusion_row"] = diffusion_grid
973
-
974
- if sample:
975
- # get denoise row
976
- with self.ema_scope("Plotting"):
977
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
978
- ddim_steps=ddim_steps,eta=ddim_eta)
979
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
980
- x_samples = self.decode_first_stage(samples)
981
- log["samples"] = x_samples
982
- if plot_denoise_rows:
983
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
984
- log["denoise_row"] = denoise_grid
985
-
986
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
987
- self.first_stage_model, IdentityFirstStage):
988
- # also display when quantizing x0 while sampling
989
- with self.ema_scope("Plotting Quantized Denoised"):
990
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
991
- ddim_steps=ddim_steps,eta=ddim_eta,
992
- quantize_denoised=True)
993
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
994
- # quantize_denoised=True)
995
- x_samples = self.decode_first_stage(samples.to(self.device))
996
- log["samples_x0_quantized"] = x_samples
997
-
998
- if inpaint:
999
- # make a simple center square
1000
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
1001
- mask = torch.ones(N, h, w).to(self.device)
1002
- # zeros will be filled in
1003
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1004
- mask = mask[:, None, ...]
1005
- with self.ema_scope("Plotting Inpaint"):
1006
-
1007
- samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1008
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1009
- x_samples = self.decode_first_stage(samples.to(self.device))
1010
- log["samples_inpainting"] = x_samples
1011
- log["mask_inpainting"] = mask
1012
-
1013
- # outpaint
1014
- mask = 1 - mask
1015
- with self.ema_scope("Plotting Outpaint"):
1016
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1017
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1018
- x_samples = self.decode_first_stage(samples.to(self.device))
1019
- log["samples_outpainting"] = x_samples
1020
- log["mask_outpainting"] = mask
1021
-
1022
- if plot_progressive_rows:
1023
- with self.ema_scope("Plotting Progressives"):
1024
- img, progressives = self.progressive_denoising(c,
1025
- shape=(self.channels, self.mel_dim, self.mel_length),
1026
- batch_size=N)
1027
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1028
- log["progressive_row"] = prog_row
1029
-
1030
- if return_keys:
1031
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1032
- return log
1033
- else:
1034
- return {key: log[key] for key in return_keys}
1035
- return log
1036
-
1037
- def configure_optimizers(self):
1038
- lr = self.learning_rate
1039
- params = list(self.model.parameters())
1040
- if self.cond_stage_trainable:
1041
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1042
- params = params + list(self.cond_stage_model.parameters())
1043
- if self.learn_logvar:
1044
- print('Diffusion model optimizing logvar')
1045
- params.append(self.logvar)
1046
- opt = torch.optim.AdamW(params, lr=lr)
1047
- if self.use_scheduler:
1048
- assert 'target' in self.scheduler_config
1049
- scheduler = instantiate_from_config(self.scheduler_config)
1050
-
1051
- print("Setting up LambdaLR scheduler...")
1052
- scheduler = [
1053
- {
1054
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1055
- 'interval': 'step',
1056
- 'frequency': 1
1057
- }]
1058
- return [opt], scheduler
1059
- return opt
1060
-
1061
- @torch.no_grad()
1062
- def to_rgb(self, x):
1063
- x = x.float()
1064
- if not hasattr(self, "colorize"):
1065
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1066
- x = nn.functional.conv2d(x, weight=self.colorize)
1067
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1068
- return x
1069
-
1070
-
1071
- class LatentFinetuneDiffusion(LatentDiffusion_audio):
1072
- """
1073
- Basis for different finetunas, such as inpainting or depth2image
1074
- To disable finetuning mode, set finetune_keys to None
1075
- """
1076
-
1077
- def __init__(self,
1078
- concat_keys: tuple,
1079
- finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
1080
- "model_ema.diffusion_modelinput_blocks00weight"
1081
- ),
1082
- keep_finetune_dims=4,
1083
- # if model was trained without concat mode before and we would like to keep these channels
1084
- c_concat_log_start=None, # to log reconstruction of c_concat codes
1085
- c_concat_log_end=None,
1086
- *args, **kwargs
1087
- ):
1088
- ckpt_path = kwargs.pop("ckpt_path", None)
1089
- ignore_keys = kwargs.pop("ignore_keys", list())
1090
- super().__init__(*args, **kwargs)
1091
- self.finetune_keys = finetune_keys
1092
- self.concat_keys = concat_keys
1093
- self.keep_dims = keep_finetune_dims
1094
- self.c_concat_log_start = c_concat_log_start
1095
- self.c_concat_log_end = c_concat_log_end
1096
-
1097
- if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
1098
- if exists(ckpt_path):
1099
- self.init_from_ckpt(ckpt_path, ignore_keys)
1100
-
1101
- def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
1102
- sd = torch.load(path, map_location="cpu")
1103
- if "state_dict" in list(sd.keys()):
1104
- sd = sd["state_dict"]
1105
- keys = list(sd.keys())
1106
-
1107
- for k in keys:
1108
- for ik in ignore_keys:
1109
- if k.startswith(ik):
1110
- print("Deleting key {} from state_dict.".format(k))
1111
- del sd[k]
1112
-
1113
- # make it explicit, finetune by including extra input channels
1114
- if exists(self.finetune_keys) and k in self.finetune_keys:
1115
- new_entry = None
1116
- for name, param in self.named_parameters():
1117
- if name in self.finetune_keys:
1118
- print(
1119
- f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
1120
- new_entry = torch.zeros_like(param) # zero init
1121
- assert exists(new_entry), 'did not find matching parameter to modify'
1122
- new_entry[:, :self.keep_dims, ...] = sd[k]
1123
- sd[k] = new_entry
1124
-
1125
- missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False)
1126
- print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
1127
- if len(missing) > 0:
1128
- print(f"Missing Keys: {missing}")
1129
- if len(unexpected) > 0:
1130
- print(f"Unexpected Keys: {unexpected}")
1131
-
1132
- @torch.no_grad()
1133
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1134
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1135
- plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
1136
- use_ema_scope=True,
1137
- **kwargs):
1138
- use_ddim = ddim_steps is not None
1139
-
1140
- log = dict()
1141
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
1142
- c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
1143
- N = min(x.shape[0], N)
1144
- n_row = min(x.shape[0], n_row)
1145
- log["inputs"] = x
1146
- log["reconstruction"] = xrec
1147
- if self.model.conditioning_key is not None:
1148
- if hasattr(self.cond_stage_model, "decode"):
1149
- xc = self.cond_stage_model.decode(c)
1150
- log["conditioning"] = xc
1151
- elif self.cond_stage_key in ["caption"]:
1152
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1153
- log["conditioning"] = xc
1154
- elif self.cond_stage_key == 'class_label':
1155
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1156
- log['conditioning'] = xc
1157
- elif isimage(xc):
1158
- log["conditioning"] = xc
1159
- if ismap(xc):
1160
- log["original_conditioning"] = self.to_rgb(xc)
1161
-
1162
- if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
1163
- log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
1164
-
1165
- if plot_diffusion_rows:
1166
- # get diffusion row
1167
- diffusion_row = list()
1168
- z_start = z[:n_row]
1169
- for t in range(self.num_timesteps):
1170
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1171
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1172
- t = t.to(self.device).long()
1173
- noise = torch.randn_like(z_start)
1174
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1175
- diffusion_row.append(self.decode_first_stage(z_noisy))
1176
-
1177
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1178
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1179
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1180
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1181
- log["diffusion_row"] = diffusion_grid
1182
-
1183
- if sample:
1184
- # get denoise row
1185
- with self.ema_scope("Sampling"):
1186
- samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1187
- batch_size=N, ddim=use_ddim,
1188
- ddim_steps=ddim_steps, eta=ddim_eta)
1189
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1190
- x_samples = self.decode_first_stage(samples)
1191
- log["samples"] = x_samples
1192
- if plot_denoise_rows:
1193
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1194
- log["denoise_row"] = denoise_grid
1195
-
1196
- if unconditional_guidance_scale > 1.0:
1197
- uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
1198
- uc_cat = c_cat
1199
- uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
1200
- with self.ema_scope("Sampling with classifier-free guidance"):
1201
- samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
1202
- batch_size=N, ddim=use_ddim,
1203
- ddim_steps=ddim_steps, eta=ddim_eta,
1204
- unconditional_guidance_scale=unconditional_guidance_scale,
1205
- unconditional_conditioning=uc_full,
1206
- )
1207
- x_samples_cfg = self.decode_first_stage(samples_cfg)
1208
- log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
1209
-
1210
- return log
1211
-
1212
-
1213
- class LatentInpaintDiffusion(LatentFinetuneDiffusion):
1214
- """
1215
- can either run as pure inpainting model (only concat mode) or with mixed conditionings,
1216
- e.g. mask as concat and text via cross-attn.
1217
- To disable finetuning mode, set finetune_keys to None
1218
- """
1219
-
1220
- def __init__(self,
1221
- concat_keys=("mask", "masked_image"),
1222
- masked_image_key="masked_image",
1223
- *args, **kwargs
1224
- ):
1225
- super().__init__(concat_keys, *args, **kwargs)
1226
- self.masked_image_key = masked_image_key
1227
- assert self.masked_image_key in concat_keys
1228
-
1229
- @torch.no_grad()
1230
- def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
1231
- # note: restricted to non-trainable encoders currently
1232
- assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
1233
- z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
1234
- force_c_encode=True, return_original_cond=True, bs=bs)
1235
-
1236
- assert exists(self.concat_keys)
1237
- c_cat = list()
1238
- for ck in self.concat_keys:
1239
- if len(batch[ck].shape) == 3:
1240
- batch[ck] = batch[ck][..., None]
1241
- cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1242
- if bs is not None:
1243
- cc = cc[:bs]
1244
- cc = cc.to(self.device)
1245
- bchw = z.shape
1246
- if ck != self.masked_image_key:
1247
- cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
1248
- else:
1249
- cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
1250
- c_cat.append(cc)
1251
- c_cat = torch.cat(c_cat, dim=1)
1252
- all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
1253
- if return_first_stage_outputs:
1254
- return z, all_conds, x, xrec, xc
1255
- return z, all_conds
1256
-
1257
- @torch.no_grad()
1258
- def log_images(self, *args, **kwargs):
1259
- log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
1260
- log["masked_image"] = rearrange(args[0]["masked_image"],
1261
- 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
1262
- return log
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/logging.py DELETED
@@ -1,291 +0,0 @@
1
- """Logging module for Auto-GPT."""
2
- import logging
3
- import os
4
- import random
5
- import re
6
- import time
7
- import json
8
- import abc
9
- from logging import LogRecord
10
- from typing import Any, List
11
-
12
- from colorama import Fore, Style
13
- from agentverse.utils import Singleton
14
-
15
-
16
- # from autogpt.speech import say_text
17
- class JsonFileHandler(logging.FileHandler):
18
- def __init__(self, filename, mode="a", encoding=None, delay=False):
19
- super().__init__(filename, mode, encoding, delay)
20
-
21
- def emit(self, record):
22
- json_data = json.loads(self.format(record))
23
- with open(self.baseFilename, "w", encoding="utf-8") as f:
24
- json.dump(json_data, f, ensure_ascii=False, indent=4)
25
-
26
-
27
- class JsonFormatter(logging.Formatter):
28
- def format(self, record):
29
- return record.msg
30
-
31
-
32
- class Logger(metaclass=Singleton):
33
- """
34
- Logger that handle titles in different colors.
35
- Outputs logs in console, activity.log, and errors.log
36
- For console handler: simulates typing
37
- """
38
-
39
- def __init__(self):
40
- # create log directory if it doesn't exist
41
- this_files_dir_path = os.path.dirname(__file__)
42
- log_dir = os.path.join(this_files_dir_path, "../logs")
43
- if not os.path.exists(log_dir):
44
- os.makedirs(log_dir)
45
-
46
- log_file = "activity.log"
47
- error_file = "error.log"
48
-
49
- console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
50
-
51
- # Create a handler for console which simulate typing
52
- self.typing_console_handler = TypingConsoleHandler()
53
- self.typing_console_handler.setLevel(logging.INFO)
54
- self.typing_console_handler.setFormatter(console_formatter)
55
-
56
- # Create a handler for console without typing simulation
57
- self.console_handler = ConsoleHandler()
58
- self.console_handler.setLevel(logging.DEBUG)
59
- self.console_handler.setFormatter(console_formatter)
60
-
61
- # Info handler in activity.log
62
- self.file_handler = logging.FileHandler(
63
- os.path.join(log_dir, log_file), "a", "utf-8"
64
- )
65
- self.file_handler.setLevel(logging.DEBUG)
66
- info_formatter = AutoGptFormatter(
67
- "%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
68
- )
69
- self.file_handler.setFormatter(info_formatter)
70
-
71
- # Error handler error.log
72
- error_handler = logging.FileHandler(
73
- os.path.join(log_dir, error_file), "a", "utf-8"
74
- )
75
- error_handler.setLevel(logging.ERROR)
76
- error_formatter = AutoGptFormatter(
77
- "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
78
- " %(message_no_color)s"
79
- )
80
- error_handler.setFormatter(error_formatter)
81
-
82
- self.typing_logger = logging.getLogger("TYPER")
83
- self.typing_logger.addHandler(self.typing_console_handler)
84
- self.typing_logger.addHandler(self.file_handler)
85
- self.typing_logger.addHandler(error_handler)
86
- self.typing_logger.setLevel(logging.DEBUG)
87
-
88
- self.logger = logging.getLogger("LOGGER")
89
- self.logger.addHandler(self.console_handler)
90
- self.logger.addHandler(self.file_handler)
91
- self.logger.addHandler(error_handler)
92
- self.logger.setLevel(logging.DEBUG)
93
-
94
- self.json_logger = logging.getLogger("JSON_LOGGER")
95
- self.json_logger.addHandler(self.file_handler)
96
- self.json_logger.addHandler(error_handler)
97
- self.json_logger.setLevel(logging.DEBUG)
98
-
99
- self.speak_mode = False
100
- self.chat_plugins = []
101
-
102
- def typewriter_log(
103
- self, title="", title_color="", content="", speak_text=False, level=logging.INFO
104
- ):
105
- # if speak_text and self.speak_mode:
106
- # say_text(f"{title}. {content}")
107
-
108
- for plugin in self.chat_plugins:
109
- plugin.report(f"{title}. {content}")
110
-
111
- if content:
112
- if isinstance(content, list):
113
- content = "\n".join(content)
114
- else:
115
- content = ""
116
-
117
- self.typing_logger.log(
118
- level, content, extra={"title": title, "color": title_color}
119
- )
120
-
121
- def debug(
122
- self,
123
- message,
124
- title="",
125
- title_color="",
126
- ):
127
- self._log(title, title_color, message, logging.DEBUG)
128
-
129
- def info(
130
- self,
131
- message,
132
- title="",
133
- title_color="",
134
- ):
135
- self._log(title, title_color, message, logging.INFO)
136
-
137
- def warn(
138
- self,
139
- message,
140
- title="",
141
- title_color="",
142
- ):
143
- self._log(title, title_color, message, logging.WARN)
144
-
145
- def error(self, title, message=""):
146
- self._log(title, Fore.RED, message, logging.ERROR)
147
-
148
- def _log(
149
- self,
150
- title: str = "",
151
- title_color: str = "",
152
- message: str = "",
153
- level=logging.INFO,
154
- ):
155
- if isinstance(message, list):
156
- if len(message) > 0:
157
- message = "\n".join([str(m) for m in message])
158
- else:
159
- message = ""
160
- self.logger.log(
161
- level, message, extra={"title": str(title), "color": str(title_color)}
162
- )
163
-
164
- def set_level(self, level):
165
- self.logger.setLevel(level)
166
- self.typing_logger.setLevel(level)
167
-
168
- def double_check(self, additionalText=None):
169
- if not additionalText:
170
- additionalText = (
171
- "Please ensure you've setup and configured everything"
172
- " correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
173
- "double check. You can also create a github issue or join the discord"
174
- " and ask there!"
175
- )
176
-
177
- self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
178
-
179
- def log_json(self, data: Any, file_name: str) -> None:
180
- # Define log directory
181
- this_files_dir_path = os.path.dirname(__file__)
182
- log_dir = os.path.join(this_files_dir_path, "../logs")
183
-
184
- # Create a handler for JSON files
185
- json_file_path = os.path.join(log_dir, file_name)
186
- json_data_handler = JsonFileHandler(json_file_path)
187
- json_data_handler.setFormatter(JsonFormatter())
188
-
189
- # Log the JSON data using the custom file handler
190
- self.json_logger.addHandler(json_data_handler)
191
- self.json_logger.debug(data)
192
- self.json_logger.removeHandler(json_data_handler)
193
-
194
- def log_prompt(self, prompt: List[dict]) -> None:
195
- self.debug("", "-=-=-=-=-=-=-=-=Prompt Start-=-=-=-=-=-=-=-=", Fore.MAGENTA)
196
- for p in prompt:
197
- self.debug(
198
- p["content"]
199
- if "function_call" not in p
200
- else p["content"]
201
- + "\nFunction Call:\n"
202
- + json.dumps(p["function_call"]),
203
- title=f'==={p["role"]}===\n',
204
- title_color=Fore.MAGENTA,
205
- )
206
- self.debug("", "-=-=-=-=-=-=-=-=Prompt End-=-=-=-=-=-=-=-=", Fore.MAGENTA)
207
-
208
- def get_log_directory(self):
209
- this_files_dir_path = os.path.dirname(__file__)
210
- log_dir = os.path.join(this_files_dir_path, "../logs")
211
- return os.path.abspath(log_dir)
212
-
213
-
214
- """
215
- Output stream to console using simulated typing
216
- """
217
-
218
-
219
- class TypingConsoleHandler(logging.StreamHandler):
220
- def emit(self, record):
221
- min_typing_speed = 0.05
222
- max_typing_speed = 0.01
223
-
224
- msg = self.format(record)
225
- try:
226
- words = re.split(r"(\s+)", msg)
227
- for i, word in enumerate(words):
228
- print(word, end="", flush=True)
229
- # if i < len(words) - 1:
230
- # print(" ", end="", flush=True)
231
- typing_speed = random.uniform(min_typing_speed, max_typing_speed)
232
- time.sleep(typing_speed)
233
- # type faster after each word
234
- min_typing_speed = min_typing_speed * 0.95
235
- max_typing_speed = max_typing_speed * 0.95
236
- print()
237
- except Exception:
238
- self.handleError(record)
239
-
240
-
241
- class ConsoleHandler(logging.StreamHandler):
242
- def emit(self, record) -> None:
243
- msg = self.format(record)
244
- try:
245
- print(msg)
246
- except Exception:
247
- self.handleError(record)
248
-
249
-
250
- class AutoGptFormatter(logging.Formatter):
251
- """
252
- Allows to handle custom placeholders 'title_color' and 'message_no_color'.
253
- To use this formatter, make sure to pass 'color', 'title' as log extras.
254
- """
255
-
256
- def format(self, record: LogRecord) -> str:
257
- if hasattr(record, "color"):
258
- record.title_color = (
259
- getattr(record, "color")
260
- + getattr(record, "title", "")
261
- + " "
262
- + Style.RESET_ALL
263
- )
264
- else:
265
- record.title_color = getattr(record, "title", "")
266
-
267
- # Add this line to set 'title' to an empty string if it doesn't exist
268
- record.title = getattr(record, "title", "")
269
-
270
- if hasattr(record, "msg"):
271
- record.message_no_color = remove_color_codes(getattr(record, "msg"))
272
- else:
273
- record.message_no_color = ""
274
- return super().format(record)
275
-
276
-
277
- def remove_color_codes(s: str) -> str:
278
- ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
279
- return ansi_escape.sub("", s)
280
-
281
-
282
- logger = Logger()
283
-
284
-
285
- def get_logger():
286
- return logger
287
-
288
-
289
- def typewriter_log(content="", color="", level=logging.INFO):
290
- for line in content.split("\n"):
291
- logger.typewriter_log(line, title_color=color, level=level)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AhmedBadrDev/stomach/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Stomach
3
- emoji: 🌍
4
- colorFrom: yellow
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/GetCode.py DELETED
@@ -1,232 +0,0 @@
1
-
2
-
3
-
4
- import os
5
- import pickle
6
- import numpy as np
7
- from dnnlib import tflib
8
- import tensorflow as tf
9
-
10
- import argparse
11
-
12
- def LoadModel(dataset_name):
13
- # Initialize TensorFlow.
14
- tflib.init_tf()
15
- model_path='./model/'
16
- model_name=dataset_name+'.pkl'
17
-
18
- tmp=os.path.join(model_path,model_name)
19
- with open(tmp, 'rb') as f:
20
- _, _, Gs = pickle.load(f)
21
- return Gs
22
-
23
- def lerp(a,b,t):
24
- return a + (b - a) * t
25
-
26
- #stylegan-ada
27
- def SelectName(layer_name,suffix):
28
- if suffix==None:
29
- tmp1='add:0' in layer_name
30
- tmp2='shape=(?,' in layer_name
31
- tmp4='G_synthesis_1' in layer_name
32
- tmp= tmp1 and tmp2 and tmp4
33
- else:
34
- tmp1=('/Conv0_up'+suffix) in layer_name
35
- tmp2=('/Conv1'+suffix) in layer_name
36
- tmp3=('4x4/Conv'+suffix) in layer_name
37
- tmp4='G_synthesis_1' in layer_name
38
- tmp5=('/ToRGB'+suffix) in layer_name
39
- tmp= (tmp1 or tmp2 or tmp3 or tmp5) and tmp4
40
- return tmp
41
-
42
-
43
- def GetSNames(suffix):
44
- #get style tensor name
45
- with tf.Session() as sess:
46
- op = sess.graph.get_operations()
47
- layers=[m.values() for m in op]
48
-
49
-
50
- select_layers=[]
51
- for layer in layers:
52
- layer_name=str(layer)
53
- if SelectName(layer_name,suffix):
54
- select_layers.append(layer[0])
55
- return select_layers
56
-
57
- def SelectName2(layer_name):
58
- tmp1='mod_bias' in layer_name
59
- tmp2='mod_weight' in layer_name
60
- tmp3='ToRGB' in layer_name
61
-
62
- tmp= (tmp1 or tmp2) and (not tmp3)
63
- return tmp
64
-
65
- def GetKName(Gs):
66
-
67
- layers=[var for name, var in Gs.components.synthesis.vars.items()]
68
-
69
- select_layers=[]
70
- for layer in layers:
71
- layer_name=str(layer)
72
- if SelectName2(layer_name):
73
- select_layers.append(layer)
74
- return select_layers
75
-
76
- def GetCode(Gs,random_state,num_img,num_once,dataset_name):
77
- rnd = np.random.RandomState(random_state) #5
78
-
79
- truncation_psi=0.7
80
- truncation_cutoff=8
81
-
82
- dlatent_avg=Gs.get_var('dlatent_avg')
83
-
84
- dlatents=np.zeros((num_img,512),dtype='float32')
85
- for i in range(int(num_img/num_once)):
86
- src_latents = rnd.randn(num_once, Gs.input_shape[1])
87
- src_dlatents = Gs.components.mapping.run(src_latents, None) # [seed, layer, component]
88
-
89
- # Apply truncation trick.
90
- if truncation_psi is not None and truncation_cutoff is not None:
91
- layer_idx = np.arange(src_dlatents.shape[1])[np.newaxis, :, np.newaxis]
92
- ones = np.ones(layer_idx.shape, dtype=np.float32)
93
- coefs = np.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones)
94
- src_dlatents_np=lerp(dlatent_avg, src_dlatents, coefs)
95
- src_dlatents=src_dlatents_np[:,0,:].astype('float32')
96
- dlatents[(i*num_once):((i+1)*num_once),:]=src_dlatents
97
- print('get all z and w')
98
-
99
- tmp='./npy/'+dataset_name+'/W'
100
- np.save(tmp,dlatents)
101
-
102
-
103
- def GetImg(Gs,num_img,num_once,dataset_name,save_name='images'):
104
- print('Generate Image')
105
- tmp='./npy/'+dataset_name+'/W.npy'
106
- dlatents=np.load(tmp)
107
- fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
108
-
109
- all_images=[]
110
- for i in range(int(num_img/num_once)):
111
- print(i)
112
- images=[]
113
- for k in range(num_once):
114
- tmp=dlatents[i*num_once+k]
115
- tmp=tmp[None,None,:]
116
- tmp=np.tile(tmp,(1,Gs.components.synthesis.input_shape[1],1))
117
- image2= Gs.components.synthesis.run(tmp, randomize_noise=False, output_transform=fmt)
118
- images.append(image2)
119
-
120
- images=np.concatenate(images)
121
-
122
- all_images.append(images)
123
-
124
- all_images=np.concatenate(all_images)
125
-
126
- tmp='./npy/'+dataset_name+'/'+save_name
127
- np.save(tmp,all_images)
128
-
129
- def GetS(dataset_name,num_img):
130
- print('Generate S')
131
- tmp='./npy/'+dataset_name+'/W.npy'
132
- dlatents=np.load(tmp)[:num_img]
133
-
134
- with tf.Session() as sess:
135
- init = tf.global_variables_initializer()
136
- sess.run(init)
137
-
138
- Gs=LoadModel(dataset_name)
139
- Gs.print_layers() #for ada
140
- select_layers1=GetSNames(suffix=None) #None,'/mul_1:0','/mod_weight/read:0','/MatMul:0'
141
- dlatents=dlatents[:,None,:]
142
- dlatents=np.tile(dlatents,(1,Gs.components.synthesis.input_shape[1],1))
143
-
144
- all_s = sess.run(
145
- select_layers1,
146
- feed_dict={'G_synthesis_1/dlatents_in:0': dlatents})
147
-
148
- layer_names=[layer.name for layer in select_layers1]
149
- save_tmp=[layer_names,all_s]
150
- return save_tmp
151
-
152
-
153
-
154
-
155
- def convert_images_to_uint8(images, drange=[-1,1], nchw_to_nhwc=False):
156
- """Convert a minibatch of images from float32 to uint8 with configurable dynamic range.
157
- Can be used as an output transformation for Network.run().
158
- """
159
- if nchw_to_nhwc:
160
- images = np.transpose(images, [0, 2, 3, 1])
161
-
162
- scale = 255 / (drange[1] - drange[0])
163
- images = images * scale + (0.5 - drange[0] * scale)
164
-
165
- np.clip(images, 0, 255, out=images)
166
- images=images.astype('uint8')
167
- return images
168
-
169
-
170
- def GetCodeMS(dlatents):
171
- m=[]
172
- std=[]
173
- for i in range(len(dlatents)):
174
- tmp= dlatents[i]
175
- tmp_mean=tmp.mean(axis=0)
176
- tmp_std=tmp.std(axis=0)
177
- m.append(tmp_mean)
178
- std.append(tmp_std)
179
- return m,std
180
-
181
-
182
-
183
- #%%
184
- if __name__ == "__main__":
185
-
186
-
187
- parser = argparse.ArgumentParser(description='Process some integers.')
188
-
189
- parser.add_argument('--dataset_name',type=str,default='ffhq',
190
- help='name of dataset, for example, ffhq')
191
- parser.add_argument('--code_type',choices=['w','s','s_mean_std'],default='w')
192
-
193
- args = parser.parse_args()
194
- random_state=5
195
- num_img=100_000
196
- num_once=1_000
197
- dataset_name=args.dataset_name
198
-
199
- if not os.path.isfile('./model/'+dataset_name+'.pkl'):
200
- url='https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/'
201
- name='stylegan2-'+dataset_name+'-config-f.pkl'
202
- os.system('wget ' +url+name + ' -P ./model/')
203
- os.system('mv ./model/'+name+' ./model/'+dataset_name+'.pkl')
204
-
205
- if not os.path.isdir('./npy/'+dataset_name):
206
- os.system('mkdir ./npy/'+dataset_name)
207
-
208
- if args.code_type=='w':
209
- Gs=LoadModel(dataset_name=dataset_name)
210
- GetCode(Gs,random_state,num_img,num_once,dataset_name)
211
- # GetImg(Gs,num_img=num_img,num_once=num_once,dataset_name=dataset_name,save_name='images_100K') #no need
212
- elif args.code_type=='s':
213
- save_name='S'
214
- save_tmp=GetS(dataset_name,num_img=2_000)
215
- tmp='./npy/'+dataset_name+'/'+save_name
216
- with open(tmp, "wb") as fp:
217
- pickle.dump(save_tmp, fp)
218
-
219
- elif args.code_type=='s_mean_std':
220
- save_tmp=GetS(dataset_name,num_img=num_img)
221
- dlatents=save_tmp[1]
222
- m,std=GetCodeMS(dlatents)
223
- save_tmp=[m,std]
224
- save_name='S_mean_std'
225
- tmp='./npy/'+dataset_name+'/'+save_name
226
- with open(tmp, "wb") as fp:
227
- pickle.dump(save_tmp, fp)
228
-
229
-
230
-
231
-
232
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/safety_checker_flax.py DELETED
@@ -1,112 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Optional, Tuple
16
-
17
- import jax
18
- import jax.numpy as jnp
19
- from flax import linen as nn
20
- from flax.core.frozen_dict import FrozenDict
21
- from transformers import CLIPConfig, FlaxPreTrainedModel
22
- from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
23
-
24
-
25
- def jax_cosine_distance(emb_1, emb_2, eps=1e-12):
26
- norm_emb_1 = jnp.divide(emb_1.T, jnp.clip(jnp.linalg.norm(emb_1, axis=1), a_min=eps)).T
27
- norm_emb_2 = jnp.divide(emb_2.T, jnp.clip(jnp.linalg.norm(emb_2, axis=1), a_min=eps)).T
28
- return jnp.matmul(norm_emb_1, norm_emb_2.T)
29
-
30
-
31
- class FlaxStableDiffusionSafetyCheckerModule(nn.Module):
32
- config: CLIPConfig
33
- dtype: jnp.dtype = jnp.float32
34
-
35
- def setup(self):
36
- self.vision_model = FlaxCLIPVisionModule(self.config.vision_config)
37
- self.visual_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype)
38
-
39
- self.concept_embeds = self.param("concept_embeds", jax.nn.initializers.ones, (17, self.config.projection_dim))
40
- self.special_care_embeds = self.param(
41
- "special_care_embeds", jax.nn.initializers.ones, (3, self.config.projection_dim)
42
- )
43
-
44
- self.concept_embeds_weights = self.param("concept_embeds_weights", jax.nn.initializers.ones, (17,))
45
- self.special_care_embeds_weights = self.param("special_care_embeds_weights", jax.nn.initializers.ones, (3,))
46
-
47
- def __call__(self, clip_input):
48
- pooled_output = self.vision_model(clip_input)[1]
49
- image_embeds = self.visual_projection(pooled_output)
50
-
51
- special_cos_dist = jax_cosine_distance(image_embeds, self.special_care_embeds)
52
- cos_dist = jax_cosine_distance(image_embeds, self.concept_embeds)
53
-
54
- # increase this value to create a stronger `nfsw` filter
55
- # at the cost of increasing the possibility of filtering benign image inputs
56
- adjustment = 0.0
57
-
58
- special_scores = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
59
- special_scores = jnp.round(special_scores, 3)
60
- is_special_care = jnp.any(special_scores > 0, axis=1, keepdims=True)
61
- # Use a lower threshold if an image has any special care concept
62
- special_adjustment = is_special_care * 0.01
63
-
64
- concept_scores = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
65
- concept_scores = jnp.round(concept_scores, 3)
66
- has_nsfw_concepts = jnp.any(concept_scores > 0, axis=1)
67
-
68
- return has_nsfw_concepts
69
-
70
-
71
- class FlaxStableDiffusionSafetyChecker(FlaxPreTrainedModel):
72
- config_class = CLIPConfig
73
- main_input_name = "clip_input"
74
- module_class = FlaxStableDiffusionSafetyCheckerModule
75
-
76
- def __init__(
77
- self,
78
- config: CLIPConfig,
79
- input_shape: Optional[Tuple] = None,
80
- seed: int = 0,
81
- dtype: jnp.dtype = jnp.float32,
82
- _do_init: bool = True,
83
- **kwargs,
84
- ):
85
- if input_shape is None:
86
- input_shape = (1, 224, 224, 3)
87
- module = self.module_class(config=config, dtype=dtype, **kwargs)
88
- super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
89
-
90
- def init_weights(self, rng: jax.random.KeyArray, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
91
- # init input tensor
92
- clip_input = jax.random.normal(rng, input_shape)
93
-
94
- params_rng, dropout_rng = jax.random.split(rng)
95
- rngs = {"params": params_rng, "dropout": dropout_rng}
96
-
97
- random_params = self.module.init(rngs, clip_input)["params"]
98
-
99
- return random_params
100
-
101
- def __call__(
102
- self,
103
- clip_input,
104
- params: dict = None,
105
- ):
106
- clip_input = jnp.transpose(clip_input, (0, 2, 3, 1))
107
-
108
- return self.module.apply(
109
- {"params": params or self.params},
110
- jnp.array(clip_input, dtype=jnp.float32),
111
- rngs={},
112
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/fast_rcnn_r50_fpn.py DELETED
@@ -1,62 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='FastRCNN',
4
- pretrained='torchvision://resnet50',
5
- backbone=dict(
6
- type='ResNet',
7
- depth=50,
8
- num_stages=4,
9
- out_indices=(0, 1, 2, 3),
10
- frozen_stages=1,
11
- norm_cfg=dict(type='BN', requires_grad=True),
12
- norm_eval=True,
13
- style='pytorch'),
14
- neck=dict(
15
- type='FPN',
16
- in_channels=[256, 512, 1024, 2048],
17
- out_channels=256,
18
- num_outs=5),
19
- roi_head=dict(
20
- type='StandardRoIHead',
21
- bbox_roi_extractor=dict(
22
- type='SingleRoIExtractor',
23
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
24
- out_channels=256,
25
- featmap_strides=[4, 8, 16, 32]),
26
- bbox_head=dict(
27
- type='Shared2FCBBoxHead',
28
- in_channels=256,
29
- fc_out_channels=1024,
30
- roi_feat_size=7,
31
- num_classes=80,
32
- bbox_coder=dict(
33
- type='DeltaXYWHBBoxCoder',
34
- target_means=[0., 0., 0., 0.],
35
- target_stds=[0.1, 0.1, 0.2, 0.2]),
36
- reg_class_agnostic=False,
37
- loss_cls=dict(
38
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
39
- loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
40
- # model training and testing settings
41
- train_cfg=dict(
42
- rcnn=dict(
43
- assigner=dict(
44
- type='MaxIoUAssigner',
45
- pos_iou_thr=0.5,
46
- neg_iou_thr=0.5,
47
- min_pos_iou=0.5,
48
- match_low_quality=False,
49
- ignore_iof_thr=-1),
50
- sampler=dict(
51
- type='RandomSampler',
52
- num=512,
53
- pos_fraction=0.25,
54
- neg_pos_ub=-1,
55
- add_gt_as_proposals=True),
56
- pos_weight=-1,
57
- debug=False)),
58
- test_cfg=dict(
59
- rcnn=dict(
60
- score_thr=0.05,
61
- nms=dict(type='nms', iou_threshold=0.5),
62
- max_per_img=100)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_512x512_160k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './fcn_r50-d8_512x512_160k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w18_small',
4
- backbone=dict(
5
- extra=dict(
6
- stage1=dict(num_blocks=(2, )),
7
- stage2=dict(num_blocks=(2, 2)),
8
- stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
9
- stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
 
 
 
 
 
 
 
 
 
 
spaces/Anni123/AuRoRA/retrieval_utils.py DELETED
@@ -1,248 +0,0 @@
1
- '''
2
- Modified from https://github.com/RuochenZhao/Verify-and-Edit
3
- '''
4
-
5
- import wikipedia
6
- import wikipediaapi
7
- import spacy
8
- import numpy as np
9
- import ngram
10
- #import nltk
11
- import torch
12
- import sklearn
13
- #from textblob import TextBlob
14
- from nltk import tokenize
15
- from sentence_transformers import SentenceTransformer
16
- from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer, DPRContextEncoder, DPRContextEncoderTokenizer
17
- from llm_utils import decoder_for_gpt3
18
- from utils import entity_cleansing, knowledge_cleansing
19
- import nltk
20
- nltk.download('punkt')
21
-
22
- wiki_wiki = wikipediaapi.Wikipedia('en')
23
- nlp = spacy.load("en_core_web_sm")
24
- ENT_TYPE = ['EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'NORP', 'ORG', 'PERSON', 'PRODUCT', 'WORK_OF_ART']
25
-
26
- CTX_ENCODER = DPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
27
- CTX_TOKENIZER = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", model_max_length = 512)
28
- Q_ENCODER = DPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
29
- Q_TOKENIZER = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base", model_max_length = 512)
30
-
31
-
32
- ## todo: extract entities from ConceptNet
33
- def find_ents(text, engine):
34
- doc = nlp(text)
35
- valid_ents = []
36
- for ent in doc.ents:
37
- if ent.label_ in ENT_TYPE:
38
- valid_ents.append(ent.text)
39
- #in case entity list is empty: resort to LLM to extract entity
40
- if valid_ents == []:
41
- input = "Question: " + "[ " + text + "]\n"
42
- input += "Output the entities in Question separated by comma: "
43
- response = decoder_for_gpt3(input, 32, engine=engine)
44
- valid_ents = entity_cleansing(response)
45
- return valid_ents
46
-
47
-
48
- def relevant_pages_for_ents(valid_ents, topk = 5):
49
- '''
50
- Input: a list of valid entities
51
- Output: a list of list containing topk pages for each entity
52
- '''
53
- if valid_ents == []:
54
- return []
55
- titles = []
56
- for ve in valid_ents:
57
- title = wikipedia.search(ve)[:topk]
58
- titles.append(title)
59
- #titles = list(dict.fromkeys(titles))
60
- return titles
61
-
62
-
63
- def relevant_pages_for_text(text, topk = 5):
64
- return wikipedia.search(text)[:topk]
65
-
66
-
67
- def get_wiki_objs(pages):
68
- '''
69
- Input: a list of list
70
- Output: a list of list
71
- '''
72
- if pages == []:
73
- return []
74
- obj_pages = []
75
- for titles_for_ve in pages:
76
- pages_for_ve = [wiki_wiki.page(title) for title in titles_for_ve]
77
- obj_pages.append(pages_for_ve)
78
- return obj_pages
79
-
80
-
81
- def get_linked_pages(wiki_pages, topk = 5):
82
- linked_ents = []
83
- for wp in wiki_pages:
84
- linked_ents += list(wp.links.values())
85
- if topk != -1:
86
- linked_ents = linked_ents[:topk]
87
- return linked_ents
88
-
89
-
90
- def get_texts_to_pages(pages, topk = 2):
91
- '''
92
- Input: list of list of pages
93
- Output: list of list of texts
94
- '''
95
- total_texts = []
96
- for ve_pages in pages:
97
- ve_texts = []
98
- for p in ve_pages:
99
- text = p.text
100
- text = tokenize.sent_tokenize(text)[:topk]
101
- text = ' '.join(text)
102
- ve_texts.append(text)
103
- total_texts.append(ve_texts)
104
- return total_texts
105
-
106
-
107
-
108
- def DPR_embeddings(q_encoder, q_tokenizer, question):
109
- question_embedding = q_tokenizer(question, return_tensors="pt",max_length=5, truncation=True)
110
- with torch.no_grad():
111
- try:
112
- question_embedding = q_encoder(**question_embedding)[0][0]
113
- except:
114
- print(question)
115
- print(question_embedding['input_ids'].size())
116
- raise Exception('end')
117
- question_embedding = question_embedding.numpy()
118
- return question_embedding
119
-
120
- def model_embeddings(sentence, model):
121
- embedding = model.encode([sentence])
122
- return embedding[0] #should return an array of shape 384
123
-
124
- ##todo: plus overlap filtering
125
- def filtering_retrieved_texts(question, ent_texts, retr_method="wikipedia_dpr", topk=1):
126
- filtered_texts = []
127
- for texts in ent_texts:
128
- if texts != []: #not empty list
129
- if retr_method == "ngram":
130
- pars = np.array([ngram.NGram.compare(question, sent, N=1) for sent in texts])
131
- #argsort: smallest to biggest
132
- pars = pars.argsort()[::-1][:topk]
133
- else:
134
- if retr_method == "wikipedia_dpr":
135
- sen_embeds = [DPR_embeddings(Q_ENCODER, Q_TOKENIZER, question)]
136
- par_embeds = [DPR_embeddings(CTX_ENCODER, CTX_TOKENIZER, s) for s in texts]
137
- else:
138
- embedding_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
139
- sen_embeds = [model_embeddings(question, embedding_model)]
140
- par_embeds = [model_embeddings(s, embedding_model) for s in texts]
141
- pars = sklearn.metrics.pairwise.pairwise_distances(sen_embeds, par_embeds)
142
- pars = pars.argsort(axis=1)[0][:topk]
143
- filtered_texts += [texts[i] for i in pars]
144
- filtered_texts = list(dict.fromkeys(filtered_texts))
145
- return filtered_texts
146
-
147
- def join_knowledge(filtered_texts):
148
- if filtered_texts == []:
149
- return ""
150
- return " ".join(filtered_texts)
151
-
152
- def retrieve_for_question_kb(question, engine, know_type="entity_know", no_links=False):
153
- valid_ents = find_ents(question, engine)
154
- print(valid_ents)
155
-
156
- # find pages
157
- page_titles = []
158
- if "entity" in know_type:
159
- pages_for_ents = relevant_pages_for_ents(valid_ents, topk = 5) #list of list
160
- if pages_for_ents != []:
161
- page_titles += pages_for_ents
162
- if "question" in know_type:
163
- pages_for_question = relevant_pages_for_text(question, topk = 5)
164
- if pages_for_question != []:
165
- page_titles += pages_for_question
166
- pages = get_wiki_objs(page_titles) #list of list
167
- if pages == []:
168
- return ""
169
- new_pages = []
170
- assert page_titles != []
171
- assert pages != []
172
-
173
- print(page_titles)
174
- #print(pages)
175
- for i, ve_pt in enumerate(page_titles):
176
- new_ve_pages = []
177
- for j, pt in enumerate(ve_pt):
178
- if 'disambiguation' in pt:
179
- new_ve_pages += get_linked_pages([pages[i][j]], topk=-1)
180
- else:
181
- new_ve_pages += [pages[i][j]]
182
- new_pages.append(new_ve_pages)
183
-
184
- pages = new_pages
185
-
186
- if not no_links:
187
- # add linked pages
188
- for ve_pages in pages:
189
- ve_pages += get_linked_pages(ve_pages, topk=5)
190
- ve_pages = list(dict.fromkeys(ve_pages))
191
- #get texts
192
- texts = get_texts_to_pages(pages, topk=1)
193
- filtered_texts = filtering_retrieved_texts(question, texts)
194
- joint_knowledge = join_knowledge(filtered_texts)
195
-
196
-
197
- return valid_ents, joint_knowledge
198
-
199
- def retrieve_for_question(question, engine, retrieve_source="llm_kb"):
200
- # Retrieve knowledge from LLM
201
- if "llm" in retrieve_source:
202
- self_retrieve_prompt = "Question: " + "[ " + question + "]\n"
203
- self_retrieve_prompt += "Necessary knowledge about the question by not answering the question: "
204
- self_retrieve_knowledge = decoder_for_gpt3(self_retrieve_prompt, 256, engine=engine)
205
- self_retrieve_knowledge = knowledge_cleansing(self_retrieve_knowledge)
206
- print("------Self_Know------")
207
- print(self_retrieve_knowledge)
208
-
209
- # Retrieve knowledge from KB
210
- if "kb" in retrieve_source:
211
- entities, kb_retrieve_knowledge = retrieve_for_question_kb(question, engine, no_links=True)
212
- if kb_retrieve_knowledge != "":
213
- print("------KB_Know------")
214
- print(kb_retrieve_knowledge)
215
-
216
- return entities, self_retrieve_knowledge, kb_retrieve_knowledge
217
-
218
- def refine_for_question(question, engine, self_retrieve_knowledge, kb_retrieve_knowledge, retrieve_source="llm_kb"):
219
-
220
- # Refine knowledge
221
- if retrieve_source == "llm_only":
222
- refine_knowledge = self_retrieve_knowledge
223
- elif retrieve_source == "kb_only":
224
- if kb_retrieve_knowledge != "":
225
- refine_prompt = "Question: " + "[ " + question + "]\n"
226
- refine_prompt += "Knowledge: " + "[ " + kb_retrieve_knowledge + "]\n"
227
- refine_prompt += "Based on Knowledge, output the brief and refined knowledge necessary for Question by not giving the answer: "
228
- refine_knowledge = decoder_for_gpt3(refine_prompt, 256, engine=engine)
229
- print("------Refined_Know------")
230
- print(refine_knowledge)
231
- else:
232
- refine_knowledge = ""
233
- elif retrieve_source == "llm_kb":
234
- if kb_retrieve_knowledge != "":
235
- #refine_prompt = "Question: " + "[ " + question + "]\n"
236
- refine_prompt = "Knowledge_1: " + "[ " + self_retrieve_knowledge + "]\n"
237
- refine_prompt += "Knowledge_2: " + "[ " + kb_retrieve_knowledge + "]\n"
238
- #refine_prompt += "By using Knowledge_2 to check Knowledge_1, output the brief and correct knowledge necessary for Question: "
239
- refine_prompt += "By using Knowledge_2 to check Knowledge_1, output the brief and correct knowledge: "
240
- refine_knowledge = decoder_for_gpt3(refine_prompt, 256, engine=engine)
241
- refine_knowledge = knowledge_cleansing(refine_knowledge)
242
- #refine_knowledge = kb_retrieve_knowledge + refine_knowledge
243
- print("------Refined_Know------")
244
- print(refine_knowledge)
245
- else:
246
- refine_knowledge = self_retrieve_knowledge
247
-
248
- return refine_knowledge
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/utils/fft_pytorch.py DELETED
@@ -1,73 +0,0 @@
1
- #!/usr/bin/python
2
- #****************************************************************#
3
- # ScriptName: fft_pytorch.py
4
- # Author: Anonymous_123
5
- # Create Date: 2022-08-15 11:33
6
- # Modify Author: Anonymous_123
7
- # Modify Date: 2022-08-18 17:46
8
- # Function:
9
- #***************************************************************#
10
-
11
- import torch
12
- import torch.nn as nn
13
- import torch.fft as fft
14
- import cv2
15
- import numpy as np
16
- import torchvision.transforms as transforms
17
- from PIL import Image
18
-
19
-
20
- def lowpass(input, limit):
21
- pass1 = torch.abs(fft.rfftfreq(input.shape[-1])) < limit
22
- pass2 = torch.abs(fft.fftfreq(input.shape[-2])) < limit
23
- kernel = torch.outer(pass2, pass1)
24
- fft_input = fft.rfft2(input)
25
- return fft.irfft2(fft_input*kernel, s=input.shape[-2:])
26
-
27
- class HighFrequencyLoss(nn.Module):
28
- def __init__(self, size=(224,224)):
29
- super(HighFrequencyLoss, self).__init__()
30
- '''
31
- self.h,self.w = size
32
- self.lpf = torch.zeros((self.h,1))
33
- R = (self.h+self.w)//8
34
- for x in range(self.w):
35
- for y in range(self.h):
36
- if ((x-(self.w-1)/2)**2 + (y-(self.h-1)/2)**2) < (R**2):
37
- self.lpf[y,x] = 1
38
- self.hpf = 1-self.lpf
39
- '''
40
-
41
- def forward(self, x):
42
- f = fft.fftn(x, dim=(2,3))
43
- loss = torch.abs(f).mean()
44
-
45
- # f = torch.roll(f,(self.h//2,self.w//2),dims=(2,3))
46
- # f_l = torch.mean(f * self.lpf)
47
- # f_h = torch.mean(f * self.hpf)
48
-
49
- return loss
50
-
51
- if __name__ == '__main__':
52
- import pdb
53
- pdb.set_trace()
54
- HF = HighFrequencyLoss()
55
- transform = transforms.Compose([transforms.ToTensor()])
56
-
57
- # img = cv2.imread('test_imgs/ILSVRC2012_val_00001935.JPEG')
58
- img = cv2.imread('../tmp.jpg')
59
- H,W,C = img.shape
60
- imgs = []
61
- for i in range(10):
62
- img_ = img[:, 224*i:224*(i+1), :]
63
- print(img_.shape)
64
- img_tensor = transform(Image.fromarray(img_[:,:,::-1])).unsqueeze(0)
65
- loss = HF(img_tensor).item()
66
- cv2.putText(img_, str(loss)[:6], (5,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
67
- imgs.append(img_)
68
-
69
- cv2.imwrite('tmp.jpg', cv2.hconcat(imgs))
70
-
71
-
72
-
73
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roi_align.py DELETED
@@ -1,223 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import torch
3
- import torch.nn as nn
4
- from torch.autograd import Function
5
- from torch.autograd.function import once_differentiable
6
- from torch.nn.modules.utils import _pair
7
-
8
- from ..utils import deprecated_api_warning, ext_loader
9
-
10
- ext_module = ext_loader.load_ext('_ext',
11
- ['roi_align_forward', 'roi_align_backward'])
12
-
13
-
14
- class RoIAlignFunction(Function):
15
-
16
- @staticmethod
17
- def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio,
18
- pool_mode, aligned):
19
- from ..onnx import is_custom_op_loaded
20
- has_custom_op = is_custom_op_loaded()
21
- if has_custom_op:
22
- return g.op(
23
- 'mmcv::MMCVRoiAlign',
24
- input,
25
- rois,
26
- output_height_i=output_size[0],
27
- output_width_i=output_size[1],
28
- spatial_scale_f=spatial_scale,
29
- sampling_ratio_i=sampling_ratio,
30
- mode_s=pool_mode,
31
- aligned_i=aligned)
32
- else:
33
- from torch.onnx.symbolic_opset9 import sub, squeeze
34
- from torch.onnx.symbolic_helper import _slice_helper
35
- from torch.onnx import TensorProtoDataType
36
- # batch_indices = rois[:, 0].long()
37
- batch_indices = _slice_helper(
38
- g, rois, axes=[1], starts=[0], ends=[1])
39
- batch_indices = squeeze(g, batch_indices, 1)
40
- batch_indices = g.op(
41
- 'Cast', batch_indices, to_i=TensorProtoDataType.INT64)
42
- # rois = rois[:, 1:]
43
- rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
44
- if aligned:
45
- # rois -= 0.5/spatial_scale
46
- aligned_offset = g.op(
47
- 'Constant',
48
- value_t=torch.tensor([0.5 / spatial_scale],
49
- dtype=torch.float32))
50
- rois = sub(g, rois, aligned_offset)
51
- # roi align
52
- return g.op(
53
- 'RoiAlign',
54
- input,
55
- rois,
56
- batch_indices,
57
- output_height_i=output_size[0],
58
- output_width_i=output_size[1],
59
- spatial_scale_f=spatial_scale,
60
- sampling_ratio_i=max(0, sampling_ratio),
61
- mode_s=pool_mode)
62
-
63
- @staticmethod
64
- def forward(ctx,
65
- input,
66
- rois,
67
- output_size,
68
- spatial_scale=1.0,
69
- sampling_ratio=0,
70
- pool_mode='avg',
71
- aligned=True):
72
- ctx.output_size = _pair(output_size)
73
- ctx.spatial_scale = spatial_scale
74
- ctx.sampling_ratio = sampling_ratio
75
- assert pool_mode in ('max', 'avg')
76
- ctx.pool_mode = 0 if pool_mode == 'max' else 1
77
- ctx.aligned = aligned
78
- ctx.input_shape = input.size()
79
-
80
- assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
81
-
82
- output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
83
- ctx.output_size[1])
84
- output = input.new_zeros(output_shape)
85
- if ctx.pool_mode == 0:
86
- argmax_y = input.new_zeros(output_shape)
87
- argmax_x = input.new_zeros(output_shape)
88
- else:
89
- argmax_y = input.new_zeros(0)
90
- argmax_x = input.new_zeros(0)
91
-
92
- ext_module.roi_align_forward(
93
- input,
94
- rois,
95
- output,
96
- argmax_y,
97
- argmax_x,
98
- aligned_height=ctx.output_size[0],
99
- aligned_width=ctx.output_size[1],
100
- spatial_scale=ctx.spatial_scale,
101
- sampling_ratio=ctx.sampling_ratio,
102
- pool_mode=ctx.pool_mode,
103
- aligned=ctx.aligned)
104
-
105
- ctx.save_for_backward(rois, argmax_y, argmax_x)
106
- return output
107
-
108
- @staticmethod
109
- @once_differentiable
110
- def backward(ctx, grad_output):
111
- rois, argmax_y, argmax_x = ctx.saved_tensors
112
- grad_input = grad_output.new_zeros(ctx.input_shape)
113
- # complex head architecture may cause grad_output uncontiguous.
114
- grad_output = grad_output.contiguous()
115
- ext_module.roi_align_backward(
116
- grad_output,
117
- rois,
118
- argmax_y,
119
- argmax_x,
120
- grad_input,
121
- aligned_height=ctx.output_size[0],
122
- aligned_width=ctx.output_size[1],
123
- spatial_scale=ctx.spatial_scale,
124
- sampling_ratio=ctx.sampling_ratio,
125
- pool_mode=ctx.pool_mode,
126
- aligned=ctx.aligned)
127
- return grad_input, None, None, None, None, None, None
128
-
129
-
130
- roi_align = RoIAlignFunction.apply
131
-
132
-
133
- class RoIAlign(nn.Module):
134
- """RoI align pooling layer.
135
-
136
- Args:
137
- output_size (tuple): h, w
138
- spatial_scale (float): scale the input boxes by this number
139
- sampling_ratio (int): number of inputs samples to take for each
140
- output sample. 0 to take samples densely for current models.
141
- pool_mode (str, 'avg' or 'max'): pooling mode in each bin.
142
- aligned (bool): if False, use the legacy implementation in
143
- MMDetection. If True, align the results more perfectly.
144
- use_torchvision (bool): whether to use roi_align from torchvision.
145
-
146
- Note:
147
- The implementation of RoIAlign when aligned=True is modified from
148
- https://github.com/facebookresearch/detectron2/
149
-
150
- The meaning of aligned=True:
151
-
152
- Given a continuous coordinate c, its two neighboring pixel
153
- indices (in our pixel model) are computed by floor(c - 0.5) and
154
- ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete
155
- indices [0] and [1] (which are sampled from the underlying signal
156
- at continuous coordinates 0.5 and 1.5). But the original roi_align
157
- (aligned=False) does not subtract the 0.5 when computing
158
- neighboring pixel indices and therefore it uses pixels with a
159
- slightly incorrect alignment (relative to our pixel model) when
160
- performing bilinear interpolation.
161
-
162
- With `aligned=True`,
163
- we first appropriately scale the ROI and then shift it by -0.5
164
- prior to calling roi_align. This produces the correct neighbors;
165
-
166
- The difference does not make a difference to the model's
167
- performance if ROIAlign is used together with conv layers.
168
- """
169
-
170
- @deprecated_api_warning(
171
- {
172
- 'out_size': 'output_size',
173
- 'sample_num': 'sampling_ratio'
174
- },
175
- cls_name='RoIAlign')
176
- def __init__(self,
177
- output_size,
178
- spatial_scale=1.0,
179
- sampling_ratio=0,
180
- pool_mode='avg',
181
- aligned=True,
182
- use_torchvision=False):
183
- super(RoIAlign, self).__init__()
184
-
185
- self.output_size = _pair(output_size)
186
- self.spatial_scale = float(spatial_scale)
187
- self.sampling_ratio = int(sampling_ratio)
188
- self.pool_mode = pool_mode
189
- self.aligned = aligned
190
- self.use_torchvision = use_torchvision
191
-
192
- def forward(self, input, rois):
193
- """
194
- Args:
195
- input: NCHW images
196
- rois: Bx5 boxes. First column is the index into N.\
197
- The other 4 columns are xyxy.
198
- """
199
- if self.use_torchvision:
200
- from torchvision.ops import roi_align as tv_roi_align
201
- if 'aligned' in tv_roi_align.__code__.co_varnames:
202
- return tv_roi_align(input, rois, self.output_size,
203
- self.spatial_scale, self.sampling_ratio,
204
- self.aligned)
205
- else:
206
- if self.aligned:
207
- rois -= rois.new_tensor([0.] +
208
- [0.5 / self.spatial_scale] * 4)
209
- return tv_roi_align(input, rois, self.output_size,
210
- self.spatial_scale, self.sampling_ratio)
211
- else:
212
- return roi_align(input, rois, self.output_size, self.spatial_scale,
213
- self.sampling_ratio, self.pool_mode, self.aligned)
214
-
215
- def __repr__(self):
216
- s = self.__class__.__name__
217
- s += f'(output_size={self.output_size}, '
218
- s += f'spatial_scale={self.spatial_scale}, '
219
- s += f'sampling_ratio={self.sampling_ratio}, '
220
- s += f'pool_mode={self.pool_mode}, '
221
- s += f'aligned={self.aligned}, '
222
- s += f'use_torchvision={self.use_torchvision})'
223
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ariharasudhan/YoloV5/utils/loggers/comet/hpo.py DELETED
@@ -1,118 +0,0 @@
1
- import argparse
2
- import json
3
- import logging
4
- import os
5
- import sys
6
- from pathlib import Path
7
-
8
- import comet_ml
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
- FILE = Path(__file__).resolve()
13
- ROOT = FILE.parents[3] # YOLOv5 root directory
14
- if str(ROOT) not in sys.path:
15
- sys.path.append(str(ROOT)) # add ROOT to PATH
16
-
17
- from train import train
18
- from utils.callbacks import Callbacks
19
- from utils.general import increment_path
20
- from utils.torch_utils import select_device
21
-
22
- # Project Configuration
23
- config = comet_ml.config.get_config()
24
- COMET_PROJECT_NAME = config.get_string(os.getenv("COMET_PROJECT_NAME"), "comet.project_name", default="yolov5")
25
-
26
-
27
- def get_args(known=False):
28
- parser = argparse.ArgumentParser()
29
- parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path')
30
- parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
31
- parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
32
- parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path')
33
- parser.add_argument('--epochs', type=int, default=300, help='total training epochs')
34
- parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch')
35
- parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)')
36
- parser.add_argument('--rect', action='store_true', help='rectangular training')
37
- parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
38
- parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
39
- parser.add_argument('--noval', action='store_true', help='only validate final epoch')
40
- parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor')
41
- parser.add_argument('--noplots', action='store_true', help='save no plot files')
42
- parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations')
43
- parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
44
- parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"')
45
- parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
46
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
47
- parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
48
- parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
49
- parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer')
50
- parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
51
- parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)')
52
- parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name')
53
- parser.add_argument('--name', default='exp', help='save to project/name')
54
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
55
- parser.add_argument('--quad', action='store_true', help='quad dataloader')
56
- parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler')
57
- parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
58
- parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)')
59
- parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2')
60
- parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)')
61
- parser.add_argument('--seed', type=int, default=0, help='Global training seed')
62
- parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify')
63
-
64
- # Weights & Biases arguments
65
- parser.add_argument('--entity', default=None, help='W&B: Entity')
66
- parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option')
67
- parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval')
68
- parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use')
69
-
70
- # Comet Arguments
71
- parser.add_argument("--comet_optimizer_config", type=str, help="Comet: Path to a Comet Optimizer Config File.")
72
- parser.add_argument("--comet_optimizer_id", type=str, help="Comet: ID of the Comet Optimizer sweep.")
73
- parser.add_argument("--comet_optimizer_objective", type=str, help="Comet: Set to 'minimize' or 'maximize'.")
74
- parser.add_argument("--comet_optimizer_metric", type=str, help="Comet: Metric to Optimize.")
75
- parser.add_argument("--comet_optimizer_workers",
76
- type=int,
77
- default=1,
78
- help="Comet: Number of Parallel Workers to use with the Comet Optimizer.")
79
-
80
- return parser.parse_known_args()[0] if known else parser.parse_args()
81
-
82
-
83
- def run(parameters, opt):
84
- hyp_dict = {k: v for k, v in parameters.items() if k not in ["epochs", "batch_size"]}
85
-
86
- opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve))
87
- opt.batch_size = parameters.get("batch_size")
88
- opt.epochs = parameters.get("epochs")
89
-
90
- device = select_device(opt.device, batch_size=opt.batch_size)
91
- train(hyp_dict, opt, device, callbacks=Callbacks())
92
-
93
-
94
- if __name__ == "__main__":
95
- opt = get_args(known=True)
96
-
97
- opt.weights = str(opt.weights)
98
- opt.cfg = str(opt.cfg)
99
- opt.data = str(opt.data)
100
- opt.project = str(opt.project)
101
-
102
- optimizer_id = os.getenv("COMET_OPTIMIZER_ID")
103
- if optimizer_id is None:
104
- with open(opt.comet_optimizer_config) as f:
105
- optimizer_config = json.load(f)
106
- optimizer = comet_ml.Optimizer(optimizer_config)
107
- else:
108
- optimizer = comet_ml.Optimizer(optimizer_id)
109
-
110
- opt.comet_optimizer_id = optimizer.id
111
- status = optimizer.status()
112
-
113
- opt.comet_optimizer_objective = status["spec"]["objective"]
114
- opt.comet_optimizer_metric = status["spec"]["metric"]
115
-
116
- logger.info("COMET INFO: Starting Hyperparameter Sweep")
117
- for parameter in optimizer.get_parameters():
118
- run(parameter["parameters"], opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/inpaint_zoom/zoom_in_app.py DELETED
@@ -1,186 +0,0 @@
1
- import os
2
-
3
- import gradio as gr
4
- import numpy as np
5
- import torch
6
- from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
7
- from PIL import Image
8
-
9
- from video_diffusion.inpaint_zoom.utils.zoom_in_utils import dummy, image_grid, shrink_and_paste_on_blank, write_video
10
-
11
- os.environ["CUDA_VISIBLE_DEVICES"] = "0"
12
-
13
-
14
- stable_paint_model_list = ["stabilityai/stable-diffusion-2-inpainting", "runwayml/stable-diffusion-inpainting"]
15
-
16
- stable_paint_prompt_list = [
17
- "children running in the forest , sunny, bright, by studio ghibli painting, superior quality, masterpiece, traditional Japanese colors, by Grzegorz Rutkowski, concept art",
18
- "A beautiful landscape of a mountain range with a lake in the foreground",
19
- ]
20
-
21
- stable_paint_negative_prompt_list = [
22
- "lurry, bad art, blurred, text, watermark",
23
- ]
24
-
25
-
26
- class StableDiffusionZoomIn:
27
- def __init__(self):
28
- self.pipe = None
29
-
30
- def load_model(self, model_id):
31
- if self.pipe is None:
32
- self.pipe = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16")
33
- self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)
34
- self.pipe = self.pipe.to("cuda")
35
- self.pipe.safety_checker = dummy
36
- self.pipe.enable_attention_slicing()
37
- self.pipe.enable_xformers_memory_efficient_attention()
38
- self.g_cuda = torch.Generator(device="cuda")
39
-
40
- return self.pipe
41
-
42
- def generate_video(
43
- self,
44
- model_id,
45
- prompt,
46
- negative_prompt,
47
- guidance_scale,
48
- num_inference_steps,
49
- ):
50
- pipe = self.load_model(model_id)
51
-
52
- num_init_images = 2
53
- seed = 42
54
- height = 512
55
- width = height
56
-
57
- current_image = Image.new(mode="RGBA", size=(height, width))
58
- mask_image = np.array(current_image)[:, :, 3]
59
- mask_image = Image.fromarray(255 - mask_image).convert("RGB")
60
- current_image = current_image.convert("RGB")
61
-
62
- init_images = pipe(
63
- prompt=[prompt] * num_init_images,
64
- negative_prompt=[negative_prompt] * num_init_images,
65
- image=current_image,
66
- guidance_scale=guidance_scale,
67
- height=height,
68
- width=width,
69
- generator=self.g_cuda.manual_seed(seed),
70
- mask_image=mask_image,
71
- num_inference_steps=num_inference_steps,
72
- )[0]
73
-
74
- image_grid(init_images, rows=1, cols=num_init_images)
75
-
76
- init_image_selected = 1 # @param
77
- if num_init_images == 1:
78
- init_image_selected = 0
79
- else:
80
- init_image_selected = init_image_selected - 1
81
-
82
- num_outpainting_steps = 20 # @param
83
- mask_width = 128 # @param
84
- num_interpol_frames = 30 # @param
85
-
86
- current_image = init_images[init_image_selected]
87
- all_frames = []
88
- all_frames.append(current_image)
89
-
90
- for i in range(num_outpainting_steps):
91
- print("Generating image: " + str(i + 1) + " / " + str(num_outpainting_steps))
92
-
93
- prev_image_fix = current_image
94
-
95
- prev_image = shrink_and_paste_on_blank(current_image, mask_width)
96
-
97
- current_image = prev_image
98
-
99
- # create mask (black image with white mask_width width edges)
100
- mask_image = np.array(current_image)[:, :, 3]
101
- mask_image = Image.fromarray(255 - mask_image).convert("RGB")
102
-
103
- # inpainting step
104
- current_image = current_image.convert("RGB")
105
- images = pipe(
106
- prompt=prompt,
107
- negative_prompt=negative_prompt,
108
- image=current_image,
109
- guidance_scale=guidance_scale,
110
- height=height,
111
- width=width,
112
- # this can make the whole thing deterministic but the output less exciting
113
- # generator = g_cuda.manual_seed(seed),
114
- mask_image=mask_image,
115
- num_inference_steps=num_inference_steps,
116
- )[0]
117
- current_image = images[0]
118
- current_image.paste(prev_image, mask=prev_image)
119
-
120
- # interpolation steps bewteen 2 inpainted images (=sequential zoom and crop)
121
- for j in range(num_interpol_frames - 1):
122
- interpol_image = current_image
123
- interpol_width = round(
124
- (1 - (1 - 2 * mask_width / height) ** (1 - (j + 1) / num_interpol_frames)) * height / 2
125
- )
126
- interpol_image = interpol_image.crop(
127
- (interpol_width, interpol_width, width - interpol_width, height - interpol_width)
128
- )
129
-
130
- interpol_image = interpol_image.resize((height, width))
131
-
132
- # paste the higher resolution previous image in the middle to avoid drop in quality caused by zooming
133
- interpol_width2 = round((1 - (height - 2 * mask_width) / (height - 2 * interpol_width)) / 2 * height)
134
- prev_image_fix_crop = shrink_and_paste_on_blank(prev_image_fix, interpol_width2)
135
- interpol_image.paste(prev_image_fix_crop, mask=prev_image_fix_crop)
136
-
137
- all_frames.append(interpol_image)
138
-
139
- all_frames.append(current_image)
140
-
141
- video_file_name = "infinite_zoom_out"
142
- fps = 30
143
- save_path = video_file_name + ".mp4"
144
- write_video(save_path, all_frames, fps)
145
- return save_path
146
-
147
- def app():
148
- with gr.Blocks():
149
- with gr.Row():
150
- with gr.Column():
151
- text2image_in_model_path = gr.Dropdown(
152
- choices=stable_paint_model_list, value=stable_paint_model_list[0], label="Text-Image Model Id"
153
- )
154
-
155
- text2image_in_prompt = gr.Textbox(lines=2, value=stable_paint_prompt_list[0], label="Prompt")
156
-
157
- text2image_in_negative_prompt = gr.Textbox(
158
- lines=1, value=stable_paint_negative_prompt_list[0], label="Negative Prompt"
159
- )
160
-
161
- with gr.Row():
162
- with gr.Column():
163
- text2image_in_guidance_scale = gr.Slider(
164
- minimum=0.1, maximum=15, step=0.1, value=7.5, label="Guidance Scale"
165
- )
166
-
167
- text2image_in_num_inference_step = gr.Slider(
168
- minimum=1, maximum=100, step=1, value=50, label="Num Inference Step"
169
- )
170
-
171
- text2image_in_predict = gr.Button(value="Generator")
172
-
173
- with gr.Column():
174
- output_image = gr.Video(label="Output")
175
-
176
- text2image_in_predict.click(
177
- fn=StableDiffusionZoomIn().generate_video,
178
- inputs=[
179
- text2image_in_model_path,
180
- text2image_in_prompt,
181
- text2image_in_negative_prompt,
182
- text2image_in_guidance_scale,
183
- text2image_in_num_inference_step,
184
- ],
185
- outputs=output_image,
186
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/exceptions.py DELETED
@@ -1,267 +0,0 @@
1
- # exceptions.py
2
-
3
- import re
4
- import sys
5
- import typing
6
-
7
- from .util import col, line, lineno, _collapse_string_to_ranges
8
- from .unicode import pyparsing_unicode as ppu
9
-
10
-
11
- class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
12
- pass
13
-
14
-
15
- _extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
16
- _exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
17
-
18
-
19
- class ParseBaseException(Exception):
20
- """base exception class for all parsing runtime exceptions"""
21
-
22
- # Performance tuning: we construct a *lot* of these, so keep this
23
- # constructor as small and fast as possible
24
- def __init__(
25
- self,
26
- pstr: str,
27
- loc: int = 0,
28
- msg: typing.Optional[str] = None,
29
- elem=None,
30
- ):
31
- self.loc = loc
32
- if msg is None:
33
- self.msg = pstr
34
- self.pstr = ""
35
- else:
36
- self.msg = msg
37
- self.pstr = pstr
38
- self.parser_element = self.parserElement = elem
39
- self.args = (pstr, loc, msg)
40
-
41
- @staticmethod
42
- def explain_exception(exc, depth=16):
43
- """
44
- Method to take an exception and translate the Python internal traceback into a list
45
- of the pyparsing expressions that caused the exception to be raised.
46
-
47
- Parameters:
48
-
49
- - exc - exception raised during parsing (need not be a ParseException, in support
50
- of Python exceptions that might be raised in a parse action)
51
- - depth (default=16) - number of levels back in the stack trace to list expression
52
- and function names; if None, the full stack trace names will be listed; if 0, only
53
- the failing input line, marker, and exception string will be shown
54
-
55
- Returns a multi-line string listing the ParserElements and/or function names in the
56
- exception's stack trace.
57
- """
58
- import inspect
59
- from .core import ParserElement
60
-
61
- if depth is None:
62
- depth = sys.getrecursionlimit()
63
- ret = []
64
- if isinstance(exc, ParseBaseException):
65
- ret.append(exc.line)
66
- ret.append(" " * (exc.column - 1) + "^")
67
- ret.append("{}: {}".format(type(exc).__name__, exc))
68
-
69
- if depth > 0:
70
- callers = inspect.getinnerframes(exc.__traceback__, context=depth)
71
- seen = set()
72
- for i, ff in enumerate(callers[-depth:]):
73
- frm = ff[0]
74
-
75
- f_self = frm.f_locals.get("self", None)
76
- if isinstance(f_self, ParserElement):
77
- if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
78
- continue
79
- if id(f_self) in seen:
80
- continue
81
- seen.add(id(f_self))
82
-
83
- self_type = type(f_self)
84
- ret.append(
85
- "{}.{} - {}".format(
86
- self_type.__module__, self_type.__name__, f_self
87
- )
88
- )
89
-
90
- elif f_self is not None:
91
- self_type = type(f_self)
92
- ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
93
-
94
- else:
95
- code = frm.f_code
96
- if code.co_name in ("wrapper", "<module>"):
97
- continue
98
-
99
- ret.append("{}".format(code.co_name))
100
-
101
- depth -= 1
102
- if not depth:
103
- break
104
-
105
- return "\n".join(ret)
106
-
107
- @classmethod
108
- def _from_exception(cls, pe):
109
- """
110
- internal factory method to simplify creating one type of ParseException
111
- from another - avoids having __init__ signature conflicts among subclasses
112
- """
113
- return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
114
-
115
- @property
116
- def line(self) -> str:
117
- """
118
- Return the line of text where the exception occurred.
119
- """
120
- return line(self.loc, self.pstr)
121
-
122
- @property
123
- def lineno(self) -> int:
124
- """
125
- Return the 1-based line number of text where the exception occurred.
126
- """
127
- return lineno(self.loc, self.pstr)
128
-
129
- @property
130
- def col(self) -> int:
131
- """
132
- Return the 1-based column on the line of text where the exception occurred.
133
- """
134
- return col(self.loc, self.pstr)
135
-
136
- @property
137
- def column(self) -> int:
138
- """
139
- Return the 1-based column on the line of text where the exception occurred.
140
- """
141
- return col(self.loc, self.pstr)
142
-
143
- def __str__(self) -> str:
144
- if self.pstr:
145
- if self.loc >= len(self.pstr):
146
- foundstr = ", found end of text"
147
- else:
148
- # pull out next word at error location
149
- found_match = _exception_word_extractor.match(self.pstr, self.loc)
150
- if found_match is not None:
151
- found = found_match.group(0)
152
- else:
153
- found = self.pstr[self.loc : self.loc + 1]
154
- foundstr = (", found %r" % found).replace(r"\\", "\\")
155
- else:
156
- foundstr = ""
157
- return "{}{} (at char {}), (line:{}, col:{})".format(
158
- self.msg, foundstr, self.loc, self.lineno, self.column
159
- )
160
-
161
- def __repr__(self):
162
- return str(self)
163
-
164
- def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
165
- """
166
- Extracts the exception line from the input string, and marks
167
- the location of the exception with a special symbol.
168
- """
169
- markerString = marker_string if marker_string is not None else markerString
170
- line_str = self.line
171
- line_column = self.column - 1
172
- if markerString:
173
- line_str = "".join(
174
- (line_str[:line_column], markerString, line_str[line_column:])
175
- )
176
- return line_str.strip()
177
-
178
- def explain(self, depth=16) -> str:
179
- """
180
- Method to translate the Python internal traceback into a list
181
- of the pyparsing expressions that caused the exception to be raised.
182
-
183
- Parameters:
184
-
185
- - depth (default=16) - number of levels back in the stack trace to list expression
186
- and function names; if None, the full stack trace names will be listed; if 0, only
187
- the failing input line, marker, and exception string will be shown
188
-
189
- Returns a multi-line string listing the ParserElements and/or function names in the
190
- exception's stack trace.
191
-
192
- Example::
193
-
194
- expr = pp.Word(pp.nums) * 3
195
- try:
196
- expr.parse_string("123 456 A789")
197
- except pp.ParseException as pe:
198
- print(pe.explain(depth=0))
199
-
200
- prints::
201
-
202
- 123 456 A789
203
- ^
204
- ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
205
-
206
- Note: the diagnostic output will include string representations of the expressions
207
- that failed to parse. These representations will be more helpful if you use `set_name` to
208
- give identifiable names to your expressions. Otherwise they will use the default string
209
- forms, which may be cryptic to read.
210
-
211
- Note: pyparsing's default truncation of exception tracebacks may also truncate the
212
- stack of expressions that are displayed in the ``explain`` output. To get the full listing
213
- of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
214
- """
215
- return self.explain_exception(self, depth)
216
-
217
- markInputline = mark_input_line
218
-
219
-
220
- class ParseException(ParseBaseException):
221
- """
222
- Exception thrown when a parse expression doesn't match the input string
223
-
224
- Example::
225
-
226
- try:
227
- Word(nums).set_name("integer").parse_string("ABC")
228
- except ParseException as pe:
229
- print(pe)
230
- print("column: {}".format(pe.column))
231
-
232
- prints::
233
-
234
- Expected integer (at char 0), (line:1, col:1)
235
- column: 1
236
-
237
- """
238
-
239
-
240
- class ParseFatalException(ParseBaseException):
241
- """
242
- User-throwable exception thrown when inconsistent parse content
243
- is found; stops all parsing immediately
244
- """
245
-
246
-
247
- class ParseSyntaxException(ParseFatalException):
248
- """
249
- Just like :class:`ParseFatalException`, but thrown internally
250
- when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
251
- that parsing is to stop immediately because an unbacktrackable
252
- syntax error has been found.
253
- """
254
-
255
-
256
- class RecursiveGrammarException(Exception):
257
- """
258
- Exception thrown by :class:`ParserElement.validate` if the
259
- grammar could be left-recursive; parser may need to enable
260
- left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
261
- """
262
-
263
- def __init__(self, parseElementList):
264
- self.parseElementTrace = parseElementList
265
-
266
- def __str__(self) -> str:
267
- return "RecursiveGrammarException: {}".format(self.parseElementTrace)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_deprecation_warning.py DELETED
@@ -1,7 +0,0 @@
1
- class SetuptoolsDeprecationWarning(Warning):
2
- """
3
- Base class for warning deprecations in ``setuptools``
4
-
5
- This class is not derived from ``DeprecationWarning``, and as such is
6
- visible by default.
7
- """
 
 
 
 
 
 
 
 
spaces/Audiogen/vector-search-demo/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Vector Search Demo
3
- emoji: 💻
4
- colorFrom: green
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.47.1
8
- app_file: app.py
9
- pinned: false
10
- license: unlicense
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/routes/conversation/[id]/+server.ts DELETED
@@ -1,236 +0,0 @@
1
- import { PUBLIC_SEP_TOKEN } from "$env/static/public";
2
- import { buildPrompt } from "$lib/buildPrompt.js";
3
- import { abortedGenerations } from "$lib/server/abortedGenerations.js";
4
- import { collections } from "$lib/server/database.js";
5
- import { modelEndpoint } from "$lib/server/modelEndpoint.js";
6
- import type { Message } from "$lib/types/Message.js";
7
- import { concatUint8Arrays } from "$lib/utils/concatUint8Arrays.js";
8
- import { streamToAsyncIterable } from "$lib/utils/streamToAsyncIterable";
9
- import { trimPrefix } from "$lib/utils/trimPrefix.js";
10
- import { trimSuffix } from "$lib/utils/trimSuffix.js";
11
- import type { TextGenerationStreamOutput } from "@huggingface/inference";
12
- import { error } from "@sveltejs/kit";
13
- import { ObjectId } from "mongodb";
14
- import { z } from "zod";
15
-
16
- export async function POST({ request, fetch, locals, params }) {
17
- // todo: add validation on params.id
18
- const convId = new ObjectId(params.id);
19
- const date = new Date();
20
-
21
- const conv = await collections.conversations.findOne({
22
- _id: convId,
23
- sessionId: locals.sessionId,
24
- });
25
-
26
- if (!conv) {
27
- throw error(404, "Conversation not found");
28
- }
29
-
30
- const json = await request.json();
31
- const {
32
- inputs: newPrompt,
33
- options: { id: messageId, is_retry },
34
- } = z
35
- .object({
36
- inputs: z.string().trim().min(1),
37
- options: z.object({
38
- id: z.optional(z.string().uuid()),
39
- is_retry: z.optional(z.boolean()),
40
- }),
41
- })
42
- .parse(json);
43
-
44
- const messages = (() => {
45
- if (is_retry && messageId) {
46
- let retryMessageIdx = conv.messages.findIndex((message) => message.id === messageId);
47
- if (retryMessageIdx === -1) {
48
- retryMessageIdx = conv.messages.length;
49
- }
50
- return [
51
- ...conv.messages.slice(0, retryMessageIdx),
52
- { content: newPrompt, from: "user", id: messageId as Message["id"] },
53
- ];
54
- }
55
- return [
56
- ...conv.messages,
57
- { content: newPrompt, from: "user", id: (messageId as Message["id"]) || crypto.randomUUID() },
58
- ];
59
- })() satisfies Message[];
60
-
61
- // Todo: on-the-fly migration, remove later
62
- for (const message of messages) {
63
- if (!message.id) {
64
- message.id = crypto.randomUUID();
65
- }
66
- }
67
- const prompt = buildPrompt(messages);
68
-
69
- const randomEndpoint = modelEndpoint();
70
-
71
- const abortController = new AbortController();
72
-
73
- const resp = await fetch(randomEndpoint.endpoint, {
74
- headers: {
75
- "Content-Type": request.headers.get("Content-Type") ?? "application/json",
76
- Authorization: randomEndpoint.authorization,
77
- },
78
- method: "POST",
79
- body: JSON.stringify({
80
- ...json,
81
- inputs: prompt,
82
- }),
83
- signal: abortController.signal,
84
- });
85
-
86
- const [stream1, stream2] = resp.body!.tee();
87
-
88
- async function saveMessage() {
89
- let generated_text = await parseGeneratedText(stream2, convId, date, abortController);
90
-
91
- // We could also check if PUBLIC_ASSISTANT_MESSAGE_TOKEN is present and use it to slice the text
92
- if (generated_text.startsWith(prompt)) {
93
- generated_text = generated_text.slice(prompt.length);
94
- }
95
-
96
- generated_text = trimSuffix(trimPrefix(generated_text, "<|startoftext|>"), PUBLIC_SEP_TOKEN);
97
-
98
- messages.push({ from: "assistant", content: generated_text, id: crypto.randomUUID() });
99
-
100
- await collections.conversations.updateOne(
101
- {
102
- _id: convId,
103
- },
104
- {
105
- $set: {
106
- messages,
107
- updatedAt: new Date(),
108
- },
109
- }
110
- );
111
- }
112
-
113
- saveMessage().catch(console.error);
114
-
115
- // Todo: maybe we should wait for the message to be saved before ending the response - in case of errors
116
- return new Response(stream1, {
117
- headers: Object.fromEntries(resp.headers.entries()),
118
- status: resp.status,
119
- statusText: resp.statusText,
120
- });
121
- }
122
-
123
- export async function DELETE({ locals, params }) {
124
- const convId = new ObjectId(params.id);
125
-
126
- const conv = await collections.conversations.findOne({
127
- _id: convId,
128
- sessionId: locals.sessionId,
129
- });
130
-
131
- if (!conv) {
132
- throw error(404, "Conversation not found");
133
- }
134
-
135
- await collections.conversations.deleteOne({ _id: conv._id });
136
-
137
- return new Response();
138
- }
139
-
140
- async function parseGeneratedText(
141
- stream: ReadableStream,
142
- conversationId: ObjectId,
143
- promptedAt: Date,
144
- abortController: AbortController
145
- ): Promise<string> {
146
- const inputs: Uint8Array[] = [];
147
- for await (const input of streamToAsyncIterable(stream)) {
148
- inputs.push(input);
149
-
150
- const date = abortedGenerations.get(conversationId.toString());
151
-
152
- if (date && date > promptedAt) {
153
- abortController.abort("Cancelled by user");
154
- const completeInput = concatUint8Arrays(inputs);
155
-
156
- const lines = new TextDecoder()
157
- .decode(completeInput)
158
- .split("\n")
159
- .filter((line) => line.startsWith("data:"));
160
-
161
- const tokens = lines.map((line) => {
162
- try {
163
- const json: TextGenerationStreamOutput = JSON.parse(line.slice("data:".length));
164
- return json.token.text;
165
- } catch {
166
- return "";
167
- }
168
- });
169
- return tokens.join("");
170
- }
171
- }
172
-
173
- // Merge inputs into a single Uint8Array
174
- const completeInput = concatUint8Arrays(inputs);
175
-
176
- // Get last line starting with "data:" and parse it as JSON to get the generated text
177
- const message = new TextDecoder().decode(completeInput);
178
-
179
- let lastIndex = message.lastIndexOf("\ndata:");
180
- if (lastIndex === -1) {
181
- lastIndex = message.indexOf("data");
182
- }
183
-
184
- if (lastIndex === -1) {
185
- console.error("Could not parse in last message");
186
- }
187
-
188
- let lastMessage = message.slice(lastIndex).trim().slice("data:".length);
189
- if (lastMessage.includes("\n")) {
190
- lastMessage = lastMessage.slice(0, lastMessage.indexOf("\n"));
191
- }
192
-
193
- const lastMessageJSON = JSON.parse(lastMessage);
194
-
195
- if (lastMessageJSON.error) {
196
- throw new Error(lastMessageJSON.error);
197
- }
198
-
199
- const res = lastMessageJSON.generated_text;
200
-
201
- if (typeof res !== "string") {
202
- throw new Error("Could not parse generated text");
203
- }
204
-
205
- return res;
206
- }
207
-
208
- export async function PATCH({ request, locals, params }) {
209
- const { title } = z
210
- .object({ title: z.string().trim().min(1).max(100) })
211
- .parse(await request.json());
212
-
213
- const convId = new ObjectId(params.id);
214
-
215
- const conv = await collections.conversations.findOne({
216
- _id: convId,
217
- sessionId: locals.sessionId,
218
- });
219
-
220
- if (!conv) {
221
- throw error(404, "Conversation not found");
222
- }
223
-
224
- await collections.conversations.updateOne(
225
- {
226
- _id: convId,
227
- },
228
- {
229
- $set: {
230
- title,
231
- },
232
- }
233
- );
234
-
235
- return new Response();
236
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/helpers.py DELETED
@@ -1,1088 +0,0 @@
1
- # helpers.py
2
- import html.entities
3
- import re
4
- import typing
5
-
6
- from . import __diag__
7
- from .core import *
8
- from .util import _bslash, _flatten, _escape_regex_range_chars
9
-
10
-
11
- #
12
- # global helpers
13
- #
14
- def delimited_list(
15
- expr: Union[str, ParserElement],
16
- delim: Union[str, ParserElement] = ",",
17
- combine: bool = False,
18
- min: typing.Optional[int] = None,
19
- max: typing.Optional[int] = None,
20
- *,
21
- allow_trailing_delim: bool = False,
22
- ) -> ParserElement:
23
- """Helper to define a delimited list of expressions - the delimiter
24
- defaults to ','. By default, the list elements and delimiters can
25
- have intervening whitespace, and comments, but this can be
26
- overridden by passing ``combine=True`` in the constructor. If
27
- ``combine`` is set to ``True``, the matching tokens are
28
- returned as a single token string, with the delimiters included;
29
- otherwise, the matching tokens are returned as a list of tokens,
30
- with the delimiters suppressed.
31
-
32
- If ``allow_trailing_delim`` is set to True, then the list may end with
33
- a delimiter.
34
-
35
- Example::
36
-
37
- delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
38
- delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
39
- """
40
- if isinstance(expr, str_type):
41
- expr = ParserElement._literalStringClass(expr)
42
-
43
- dlName = "{expr} [{delim} {expr}]...{end}".format(
44
- expr=str(expr.copy().streamline()),
45
- delim=str(delim),
46
- end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
47
- )
48
-
49
- if not combine:
50
- delim = Suppress(delim)
51
-
52
- if min is not None:
53
- if min < 1:
54
- raise ValueError("min must be greater than 0")
55
- min -= 1
56
- if max is not None:
57
- if min is not None and max <= min:
58
- raise ValueError("max must be greater than, or equal to min")
59
- max -= 1
60
- delimited_list_expr = expr + (delim + expr)[min, max]
61
-
62
- if allow_trailing_delim:
63
- delimited_list_expr += Opt(delim)
64
-
65
- if combine:
66
- return Combine(delimited_list_expr).set_name(dlName)
67
- else:
68
- return delimited_list_expr.set_name(dlName)
69
-
70
-
71
- def counted_array(
72
- expr: ParserElement,
73
- int_expr: typing.Optional[ParserElement] = None,
74
- *,
75
- intExpr: typing.Optional[ParserElement] = None,
76
- ) -> ParserElement:
77
- """Helper to define a counted list of expressions.
78
-
79
- This helper defines a pattern of the form::
80
-
81
- integer expr expr expr...
82
-
83
- where the leading integer tells how many expr expressions follow.
84
- The matched tokens returns the array of expr tokens as a list - the
85
- leading count token is suppressed.
86
-
87
- If ``int_expr`` is specified, it should be a pyparsing expression
88
- that produces an integer value.
89
-
90
- Example::
91
-
92
- counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
93
-
94
- # in this parser, the leading integer value is given in binary,
95
- # '10' indicating that 2 values are in the array
96
- binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
97
- counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
98
-
99
- # if other fields must be parsed after the count but before the
100
- # list items, give the fields results names and they will
101
- # be preserved in the returned ParseResults:
102
- count_with_metadata = integer + Word(alphas)("type")
103
- typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
104
- result = typed_array.parse_string("3 bool True True False")
105
- print(result.dump())
106
-
107
- # prints
108
- # ['True', 'True', 'False']
109
- # - items: ['True', 'True', 'False']
110
- # - type: 'bool'
111
- """
112
- intExpr = intExpr or int_expr
113
- array_expr = Forward()
114
-
115
- def count_field_parse_action(s, l, t):
116
- nonlocal array_expr
117
- n = t[0]
118
- array_expr <<= (expr * n) if n else Empty()
119
- # clear list contents, but keep any named results
120
- del t[:]
121
-
122
- if intExpr is None:
123
- intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
124
- else:
125
- intExpr = intExpr.copy()
126
- intExpr.set_name("arrayLen")
127
- intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
128
- return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
129
-
130
-
131
- def match_previous_literal(expr: ParserElement) -> ParserElement:
132
- """Helper to define an expression that is indirectly defined from
133
- the tokens matched in a previous expression, that is, it looks for
134
- a 'repeat' of a previous expression. For example::
135
-
136
- first = Word(nums)
137
- second = match_previous_literal(first)
138
- match_expr = first + ":" + second
139
-
140
- will match ``"1:1"``, but not ``"1:2"``. Because this
141
- matches a previous literal, will also match the leading
142
- ``"1:1"`` in ``"1:10"``. If this is not desired, use
143
- :class:`match_previous_expr`. Do *not* use with packrat parsing
144
- enabled.
145
- """
146
- rep = Forward()
147
-
148
- def copy_token_to_repeater(s, l, t):
149
- if t:
150
- if len(t) == 1:
151
- rep << t[0]
152
- else:
153
- # flatten t tokens
154
- tflat = _flatten(t.as_list())
155
- rep << And(Literal(tt) for tt in tflat)
156
- else:
157
- rep << Empty()
158
-
159
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
160
- rep.set_name("(prev) " + str(expr))
161
- return rep
162
-
163
-
164
- def match_previous_expr(expr: ParserElement) -> ParserElement:
165
- """Helper to define an expression that is indirectly defined from
166
- the tokens matched in a previous expression, that is, it looks for
167
- a 'repeat' of a previous expression. For example::
168
-
169
- first = Word(nums)
170
- second = match_previous_expr(first)
171
- match_expr = first + ":" + second
172
-
173
- will match ``"1:1"``, but not ``"1:2"``. Because this
174
- matches by expressions, will *not* match the leading ``"1:1"``
175
- in ``"1:10"``; the expressions are evaluated first, and then
176
- compared, so ``"1"`` is compared with ``"10"``. Do *not* use
177
- with packrat parsing enabled.
178
- """
179
- rep = Forward()
180
- e2 = expr.copy()
181
- rep <<= e2
182
-
183
- def copy_token_to_repeater(s, l, t):
184
- matchTokens = _flatten(t.as_list())
185
-
186
- def must_match_these_tokens(s, l, t):
187
- theseTokens = _flatten(t.as_list())
188
- if theseTokens != matchTokens:
189
- raise ParseException(
190
- s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
191
- )
192
-
193
- rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
194
-
195
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
196
- rep.set_name("(prev) " + str(expr))
197
- return rep
198
-
199
-
200
- def one_of(
201
- strs: Union[typing.Iterable[str], str],
202
- caseless: bool = False,
203
- use_regex: bool = True,
204
- as_keyword: bool = False,
205
- *,
206
- useRegex: bool = True,
207
- asKeyword: bool = False,
208
- ) -> ParserElement:
209
- """Helper to quickly define a set of alternative :class:`Literal` s,
210
- and makes sure to do longest-first testing when there is a conflict,
211
- regardless of the input order, but returns
212
- a :class:`MatchFirst` for best performance.
213
-
214
- Parameters:
215
-
216
- - ``strs`` - a string of space-delimited literals, or a collection of
217
- string literals
218
- - ``caseless`` - treat all literals as caseless - (default= ``False``)
219
- - ``use_regex`` - as an optimization, will
220
- generate a :class:`Regex` object; otherwise, will generate
221
- a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
222
- creating a :class:`Regex` raises an exception) - (default= ``True``)
223
- - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
224
- generated expressions - (default= ``False``)
225
- - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
226
- but will be removed in a future release
227
-
228
- Example::
229
-
230
- comp_oper = one_of("< = > <= >= !=")
231
- var = Word(alphas)
232
- number = Word(nums)
233
- term = var | number
234
- comparison_expr = term + comp_oper + term
235
- print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
236
-
237
- prints::
238
-
239
- [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
240
- """
241
- asKeyword = asKeyword or as_keyword
242
- useRegex = useRegex and use_regex
243
-
244
- if (
245
- isinstance(caseless, str_type)
246
- and __diag__.warn_on_multiple_string_args_to_oneof
247
- ):
248
- warnings.warn(
249
- "More than one string argument passed to one_of, pass"
250
- " choices as a list or space-delimited string",
251
- stacklevel=2,
252
- )
253
-
254
- if caseless:
255
- isequal = lambda a, b: a.upper() == b.upper()
256
- masks = lambda a, b: b.upper().startswith(a.upper())
257
- parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
258
- else:
259
- isequal = lambda a, b: a == b
260
- masks = lambda a, b: b.startswith(a)
261
- parseElementClass = Keyword if asKeyword else Literal
262
-
263
- symbols: List[str] = []
264
- if isinstance(strs, str_type):
265
- symbols = strs.split()
266
- elif isinstance(strs, Iterable):
267
- symbols = list(strs)
268
- else:
269
- raise TypeError("Invalid argument to one_of, expected string or iterable")
270
- if not symbols:
271
- return NoMatch()
272
-
273
- # reorder given symbols to take care to avoid masking longer choices with shorter ones
274
- # (but only if the given symbols are not just single characters)
275
- if any(len(sym) > 1 for sym in symbols):
276
- i = 0
277
- while i < len(symbols) - 1:
278
- cur = symbols[i]
279
- for j, other in enumerate(symbols[i + 1 :]):
280
- if isequal(other, cur):
281
- del symbols[i + j + 1]
282
- break
283
- elif masks(cur, other):
284
- del symbols[i + j + 1]
285
- symbols.insert(i, other)
286
- break
287
- else:
288
- i += 1
289
-
290
- if useRegex:
291
- re_flags: int = re.IGNORECASE if caseless else 0
292
-
293
- try:
294
- if all(len(sym) == 1 for sym in symbols):
295
- # symbols are just single characters, create range regex pattern
296
- patt = "[{}]".format(
297
- "".join(_escape_regex_range_chars(sym) for sym in symbols)
298
- )
299
- else:
300
- patt = "|".join(re.escape(sym) for sym in symbols)
301
-
302
- # wrap with \b word break markers if defining as keywords
303
- if asKeyword:
304
- patt = r"\b(?:{})\b".format(patt)
305
-
306
- ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
307
-
308
- if caseless:
309
- # add parse action to return symbols as specified, not in random
310
- # casing as found in input string
311
- symbol_map = {sym.lower(): sym for sym in symbols}
312
- ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
313
-
314
- return ret
315
-
316
- except re.error:
317
- warnings.warn(
318
- "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
319
- )
320
-
321
- # last resort, just use MatchFirst
322
- return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
323
- " | ".join(symbols)
324
- )
325
-
326
-
327
- def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
328
- """Helper to easily and clearly define a dictionary by specifying
329
- the respective patterns for the key and value. Takes care of
330
- defining the :class:`Dict`, :class:`ZeroOrMore`, and
331
- :class:`Group` tokens in the proper order. The key pattern
332
- can include delimiting markers or punctuation, as long as they are
333
- suppressed, thereby leaving the significant key text. The value
334
- pattern can include named results, so that the :class:`Dict` results
335
- can include named token fields.
336
-
337
- Example::
338
-
339
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
340
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
341
- print(attr_expr[1, ...].parse_string(text).dump())
342
-
343
- attr_label = label
344
- attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
345
-
346
- # similar to Dict, but simpler call format
347
- result = dict_of(attr_label, attr_value).parse_string(text)
348
- print(result.dump())
349
- print(result['shape'])
350
- print(result.shape) # object attribute access works too
351
- print(result.as_dict())
352
-
353
- prints::
354
-
355
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
356
- - color: 'light blue'
357
- - posn: 'upper left'
358
- - shape: 'SQUARE'
359
- - texture: 'burlap'
360
- SQUARE
361
- SQUARE
362
- {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
363
- """
364
- return Dict(OneOrMore(Group(key + value)))
365
-
366
-
367
- def original_text_for(
368
- expr: ParserElement, as_string: bool = True, *, asString: bool = True
369
- ) -> ParserElement:
370
- """Helper to return the original, untokenized text for a given
371
- expression. Useful to restore the parsed fields of an HTML start
372
- tag into the raw tag text itself, or to revert separate tokens with
373
- intervening whitespace back to the original matching input text. By
374
- default, returns astring containing the original parsed text.
375
-
376
- If the optional ``as_string`` argument is passed as
377
- ``False``, then the return value is
378
- a :class:`ParseResults` containing any results names that
379
- were originally matched, and a single token containing the original
380
- matched text from the input string. So if the expression passed to
381
- :class:`original_text_for` contains expressions with defined
382
- results names, you must set ``as_string`` to ``False`` if you
383
- want to preserve those results name values.
384
-
385
- The ``asString`` pre-PEP8 argument is retained for compatibility,
386
- but will be removed in a future release.
387
-
388
- Example::
389
-
390
- src = "this is test <b> bold <i>text</i> </b> normal text "
391
- for tag in ("b", "i"):
392
- opener, closer = make_html_tags(tag)
393
- patt = original_text_for(opener + SkipTo(closer) + closer)
394
- print(patt.search_string(src)[0])
395
-
396
- prints::
397
-
398
- ['<b> bold <i>text</i> </b>']
399
- ['<i>text</i>']
400
- """
401
- asString = asString and as_string
402
-
403
- locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
404
- endlocMarker = locMarker.copy()
405
- endlocMarker.callPreparse = False
406
- matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
407
- if asString:
408
- extractText = lambda s, l, t: s[t._original_start : t._original_end]
409
- else:
410
-
411
- def extractText(s, l, t):
412
- t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
413
-
414
- matchExpr.set_parse_action(extractText)
415
- matchExpr.ignoreExprs = expr.ignoreExprs
416
- matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
417
- return matchExpr
418
-
419
-
420
- def ungroup(expr: ParserElement) -> ParserElement:
421
- """Helper to undo pyparsing's default grouping of And expressions,
422
- even if all but one are non-empty.
423
- """
424
- return TokenConverter(expr).add_parse_action(lambda t: t[0])
425
-
426
-
427
- def locatedExpr(expr: ParserElement) -> ParserElement:
428
- """
429
- (DEPRECATED - future code should use the Located class)
430
- Helper to decorate a returned token with its starting and ending
431
- locations in the input string.
432
-
433
- This helper adds the following results names:
434
-
435
- - ``locn_start`` - location where matched expression begins
436
- - ``locn_end`` - location where matched expression ends
437
- - ``value`` - the actual parsed results
438
-
439
- Be careful if the input text contains ``<TAB>`` characters, you
440
- may want to call :class:`ParserElement.parseWithTabs`
441
-
442
- Example::
443
-
444
- wd = Word(alphas)
445
- for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
446
- print(match)
447
-
448
- prints::
449
-
450
- [[0, 'ljsdf', 5]]
451
- [[8, 'lksdjjf', 15]]
452
- [[18, 'lkkjj', 23]]
453
- """
454
- locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
455
- return Group(
456
- locator("locn_start")
457
- + expr("value")
458
- + locator.copy().leaveWhitespace()("locn_end")
459
- )
460
-
461
-
462
- def nested_expr(
463
- opener: Union[str, ParserElement] = "(",
464
- closer: Union[str, ParserElement] = ")",
465
- content: typing.Optional[ParserElement] = None,
466
- ignore_expr: ParserElement = quoted_string(),
467
- *,
468
- ignoreExpr: ParserElement = quoted_string(),
469
- ) -> ParserElement:
470
- """Helper method for defining nested lists enclosed in opening and
471
- closing delimiters (``"("`` and ``")"`` are the default).
472
-
473
- Parameters:
474
- - ``opener`` - opening character for a nested list
475
- (default= ``"("``); can also be a pyparsing expression
476
- - ``closer`` - closing character for a nested list
477
- (default= ``")"``); can also be a pyparsing expression
478
- - ``content`` - expression for items within the nested lists
479
- (default= ``None``)
480
- - ``ignore_expr`` - expression for ignoring opening and closing delimiters
481
- (default= :class:`quoted_string`)
482
- - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
483
- but will be removed in a future release
484
-
485
- If an expression is not provided for the content argument, the
486
- nested expression will capture all whitespace-delimited content
487
- between delimiters as a list of separate values.
488
-
489
- Use the ``ignore_expr`` argument to define expressions that may
490
- contain opening or closing characters that should not be treated as
491
- opening or closing characters for nesting, such as quoted_string or
492
- a comment expression. Specify multiple expressions using an
493
- :class:`Or` or :class:`MatchFirst`. The default is
494
- :class:`quoted_string`, but if no expressions are to be ignored, then
495
- pass ``None`` for this argument.
496
-
497
- Example::
498
-
499
- data_type = one_of("void int short long char float double")
500
- decl_data_type = Combine(data_type + Opt(Word('*')))
501
- ident = Word(alphas+'_', alphanums+'_')
502
- number = pyparsing_common.number
503
- arg = Group(decl_data_type + ident)
504
- LPAR, RPAR = map(Suppress, "()")
505
-
506
- code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
507
-
508
- c_function = (decl_data_type("type")
509
- + ident("name")
510
- + LPAR + Opt(delimited_list(arg), [])("args") + RPAR
511
- + code_body("body"))
512
- c_function.ignore(c_style_comment)
513
-
514
- source_code = '''
515
- int is_odd(int x) {
516
- return (x%2);
517
- }
518
-
519
- int dec_to_hex(char hchar) {
520
- if (hchar >= '0' && hchar <= '9') {
521
- return (ord(hchar)-ord('0'));
522
- } else {
523
- return (10+ord(hchar)-ord('A'));
524
- }
525
- }
526
- '''
527
- for func in c_function.search_string(source_code):
528
- print("%(name)s (%(type)s) args: %(args)s" % func)
529
-
530
-
531
- prints::
532
-
533
- is_odd (int) args: [['int', 'x']]
534
- dec_to_hex (int) args: [['char', 'hchar']]
535
- """
536
- if ignoreExpr != ignore_expr:
537
- ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
538
- if opener == closer:
539
- raise ValueError("opening and closing strings cannot be the same")
540
- if content is None:
541
- if isinstance(opener, str_type) and isinstance(closer, str_type):
542
- if len(opener) == 1 and len(closer) == 1:
543
- if ignoreExpr is not None:
544
- content = Combine(
545
- OneOrMore(
546
- ~ignoreExpr
547
- + CharsNotIn(
548
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
549
- exact=1,
550
- )
551
- )
552
- ).set_parse_action(lambda t: t[0].strip())
553
- else:
554
- content = empty.copy() + CharsNotIn(
555
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS
556
- ).set_parse_action(lambda t: t[0].strip())
557
- else:
558
- if ignoreExpr is not None:
559
- content = Combine(
560
- OneOrMore(
561
- ~ignoreExpr
562
- + ~Literal(opener)
563
- + ~Literal(closer)
564
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
565
- )
566
- ).set_parse_action(lambda t: t[0].strip())
567
- else:
568
- content = Combine(
569
- OneOrMore(
570
- ~Literal(opener)
571
- + ~Literal(closer)
572
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
573
- )
574
- ).set_parse_action(lambda t: t[0].strip())
575
- else:
576
- raise ValueError(
577
- "opening and closing arguments must be strings if no content expression is given"
578
- )
579
- ret = Forward()
580
- if ignoreExpr is not None:
581
- ret <<= Group(
582
- Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
583
- )
584
- else:
585
- ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
586
- ret.set_name("nested %s%s expression" % (opener, closer))
587
- return ret
588
-
589
-
590
- def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
591
- """Internal helper to construct opening and closing tag expressions, given a tag name"""
592
- if isinstance(tagStr, str_type):
593
- resname = tagStr
594
- tagStr = Keyword(tagStr, caseless=not xml)
595
- else:
596
- resname = tagStr.name
597
-
598
- tagAttrName = Word(alphas, alphanums + "_-:")
599
- if xml:
600
- tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
601
- openTag = (
602
- suppress_LT
603
- + tagStr("tag")
604
- + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
605
- + Opt("/", default=[False])("empty").set_parse_action(
606
- lambda s, l, t: t[0] == "/"
607
- )
608
- + suppress_GT
609
- )
610
- else:
611
- tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
612
- printables, exclude_chars=">"
613
- )
614
- openTag = (
615
- suppress_LT
616
- + tagStr("tag")
617
- + Dict(
618
- ZeroOrMore(
619
- Group(
620
- tagAttrName.set_parse_action(lambda t: t[0].lower())
621
- + Opt(Suppress("=") + tagAttrValue)
622
- )
623
- )
624
- )
625
- + Opt("/", default=[False])("empty").set_parse_action(
626
- lambda s, l, t: t[0] == "/"
627
- )
628
- + suppress_GT
629
- )
630
- closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
631
-
632
- openTag.set_name("<%s>" % resname)
633
- # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
634
- openTag.add_parse_action(
635
- lambda t: t.__setitem__(
636
- "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
637
- )
638
- )
639
- closeTag = closeTag(
640
- "end" + "".join(resname.replace(":", " ").title().split())
641
- ).set_name("</%s>" % resname)
642
- openTag.tag = resname
643
- closeTag.tag = resname
644
- openTag.tag_body = SkipTo(closeTag())
645
- return openTag, closeTag
646
-
647
-
648
- def make_html_tags(
649
- tag_str: Union[str, ParserElement]
650
- ) -> Tuple[ParserElement, ParserElement]:
651
- """Helper to construct opening and closing tag expressions for HTML,
652
- given a tag name. Matches tags in either upper or lower case,
653
- attributes with namespaces and with quoted or unquoted values.
654
-
655
- Example::
656
-
657
- text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
658
- # make_html_tags returns pyparsing expressions for the opening and
659
- # closing tags as a 2-tuple
660
- a, a_end = make_html_tags("A")
661
- link_expr = a + SkipTo(a_end)("link_text") + a_end
662
-
663
- for link in link_expr.search_string(text):
664
- # attributes in the <A> tag (like "href" shown here) are
665
- # also accessible as named results
666
- print(link.link_text, '->', link.href)
667
-
668
- prints::
669
-
670
- pyparsing -> https://github.com/pyparsing/pyparsing/wiki
671
- """
672
- return _makeTags(tag_str, False)
673
-
674
-
675
- def make_xml_tags(
676
- tag_str: Union[str, ParserElement]
677
- ) -> Tuple[ParserElement, ParserElement]:
678
- """Helper to construct opening and closing tag expressions for XML,
679
- given a tag name. Matches tags only in the given upper/lower case.
680
-
681
- Example: similar to :class:`make_html_tags`
682
- """
683
- return _makeTags(tag_str, True)
684
-
685
-
686
- any_open_tag: ParserElement
687
- any_close_tag: ParserElement
688
- any_open_tag, any_close_tag = make_html_tags(
689
- Word(alphas, alphanums + "_:").set_name("any tag")
690
- )
691
-
692
- _htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
693
- common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
694
- "common HTML entity"
695
- )
696
-
697
-
698
- def replace_html_entity(t):
699
- """Helper parser action to replace common HTML entities with their special characters"""
700
- return _htmlEntityMap.get(t.entity)
701
-
702
-
703
- class OpAssoc(Enum):
704
- LEFT = 1
705
- RIGHT = 2
706
-
707
-
708
- InfixNotationOperatorArgType = Union[
709
- ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
710
- ]
711
- InfixNotationOperatorSpec = Union[
712
- Tuple[
713
- InfixNotationOperatorArgType,
714
- int,
715
- OpAssoc,
716
- typing.Optional[ParseAction],
717
- ],
718
- Tuple[
719
- InfixNotationOperatorArgType,
720
- int,
721
- OpAssoc,
722
- ],
723
- ]
724
-
725
-
726
- def infix_notation(
727
- base_expr: ParserElement,
728
- op_list: List[InfixNotationOperatorSpec],
729
- lpar: Union[str, ParserElement] = Suppress("("),
730
- rpar: Union[str, ParserElement] = Suppress(")"),
731
- ) -> ParserElement:
732
- """Helper method for constructing grammars of expressions made up of
733
- operators working in a precedence hierarchy. Operators may be unary
734
- or binary, left- or right-associative. Parse actions can also be
735
- attached to operator expressions. The generated parser will also
736
- recognize the use of parentheses to override operator precedences
737
- (see example below).
738
-
739
- Note: if you define a deep operator list, you may see performance
740
- issues when using infix_notation. See
741
- :class:`ParserElement.enable_packrat` for a mechanism to potentially
742
- improve your parser performance.
743
-
744
- Parameters:
745
- - ``base_expr`` - expression representing the most basic operand to
746
- be used in the expression
747
- - ``op_list`` - list of tuples, one for each operator precedence level
748
- in the expression grammar; each tuple is of the form ``(op_expr,
749
- num_operands, right_left_assoc, (optional)parse_action)``, where:
750
-
751
- - ``op_expr`` is the pyparsing expression for the operator; may also
752
- be a string, which will be converted to a Literal; if ``num_operands``
753
- is 3, ``op_expr`` is a tuple of two expressions, for the two
754
- operators separating the 3 terms
755
- - ``num_operands`` is the number of terms for this operator (must be 1,
756
- 2, or 3)
757
- - ``right_left_assoc`` is the indicator whether the operator is right
758
- or left associative, using the pyparsing-defined constants
759
- ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
760
- - ``parse_action`` is the parse action to be associated with
761
- expressions matching this operator expression (the parse action
762
- tuple member may be omitted); if the parse action is passed
763
- a tuple or list of functions, this is equivalent to calling
764
- ``set_parse_action(*fn)``
765
- (:class:`ParserElement.set_parse_action`)
766
- - ``lpar`` - expression for matching left-parentheses; if passed as a
767
- str, then will be parsed as Suppress(lpar). If lpar is passed as
768
- an expression (such as ``Literal('(')``), then it will be kept in
769
- the parsed results, and grouped with them. (default= ``Suppress('(')``)
770
- - ``rpar`` - expression for matching right-parentheses; if passed as a
771
- str, then will be parsed as Suppress(rpar). If rpar is passed as
772
- an expression (such as ``Literal(')')``), then it will be kept in
773
- the parsed results, and grouped with them. (default= ``Suppress(')')``)
774
-
775
- Example::
776
-
777
- # simple example of four-function arithmetic with ints and
778
- # variable names
779
- integer = pyparsing_common.signed_integer
780
- varname = pyparsing_common.identifier
781
-
782
- arith_expr = infix_notation(integer | varname,
783
- [
784
- ('-', 1, OpAssoc.RIGHT),
785
- (one_of('* /'), 2, OpAssoc.LEFT),
786
- (one_of('+ -'), 2, OpAssoc.LEFT),
787
- ])
788
-
789
- arith_expr.run_tests('''
790
- 5+3*6
791
- (5+3)*6
792
- -2--11
793
- ''', full_dump=False)
794
-
795
- prints::
796
-
797
- 5+3*6
798
- [[5, '+', [3, '*', 6]]]
799
-
800
- (5+3)*6
801
- [[[5, '+', 3], '*', 6]]
802
-
803
- -2--11
804
- [[['-', 2], '-', ['-', 11]]]
805
- """
806
- # captive version of FollowedBy that does not do parse actions or capture results names
807
- class _FB(FollowedBy):
808
- def parseImpl(self, instring, loc, doActions=True):
809
- self.expr.try_parse(instring, loc)
810
- return loc, []
811
-
812
- _FB.__name__ = "FollowedBy>"
813
-
814
- ret = Forward()
815
- if isinstance(lpar, str):
816
- lpar = Suppress(lpar)
817
- if isinstance(rpar, str):
818
- rpar = Suppress(rpar)
819
-
820
- # if lpar and rpar are not suppressed, wrap in group
821
- if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
822
- lastExpr = base_expr | Group(lpar + ret + rpar)
823
- else:
824
- lastExpr = base_expr | (lpar + ret + rpar)
825
-
826
- for i, operDef in enumerate(op_list):
827
- opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
828
- if isinstance(opExpr, str_type):
829
- opExpr = ParserElement._literalStringClass(opExpr)
830
- if arity == 3:
831
- if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
832
- raise ValueError(
833
- "if numterms=3, opExpr must be a tuple or list of two expressions"
834
- )
835
- opExpr1, opExpr2 = opExpr
836
- term_name = "{}{} term".format(opExpr1, opExpr2)
837
- else:
838
- term_name = "{} term".format(opExpr)
839
-
840
- if not 1 <= arity <= 3:
841
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
842
-
843
- if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
844
- raise ValueError("operator must indicate right or left associativity")
845
-
846
- thisExpr: Forward = Forward().set_name(term_name)
847
- if rightLeftAssoc is OpAssoc.LEFT:
848
- if arity == 1:
849
- matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
850
- elif arity == 2:
851
- if opExpr is not None:
852
- matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
853
- lastExpr + (opExpr + lastExpr)[1, ...]
854
- )
855
- else:
856
- matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
857
- elif arity == 3:
858
- matchExpr = _FB(
859
- lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
860
- ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
861
- elif rightLeftAssoc is OpAssoc.RIGHT:
862
- if arity == 1:
863
- # try to avoid LR with this extra test
864
- if not isinstance(opExpr, Opt):
865
- opExpr = Opt(opExpr)
866
- matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
867
- elif arity == 2:
868
- if opExpr is not None:
869
- matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
870
- lastExpr + (opExpr + thisExpr)[1, ...]
871
- )
872
- else:
873
- matchExpr = _FB(lastExpr + thisExpr) + Group(
874
- lastExpr + thisExpr[1, ...]
875
- )
876
- elif arity == 3:
877
- matchExpr = _FB(
878
- lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
879
- ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
880
- if pa:
881
- if isinstance(pa, (tuple, list)):
882
- matchExpr.set_parse_action(*pa)
883
- else:
884
- matchExpr.set_parse_action(pa)
885
- thisExpr <<= (matchExpr | lastExpr).setName(term_name)
886
- lastExpr = thisExpr
887
- ret <<= lastExpr
888
- return ret
889
-
890
-
891
- def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
892
- """
893
- (DEPRECATED - use IndentedBlock class instead)
894
- Helper method for defining space-delimited indentation blocks,
895
- such as those used to define block statements in Python source code.
896
-
897
- Parameters:
898
-
899
- - ``blockStatementExpr`` - expression defining syntax of statement that
900
- is repeated within the indented block
901
- - ``indentStack`` - list created by caller to manage indentation stack
902
- (multiple ``statementWithIndentedBlock`` expressions within a single
903
- grammar should share a common ``indentStack``)
904
- - ``indent`` - boolean indicating whether block must be indented beyond
905
- the current level; set to ``False`` for block of left-most statements
906
- (default= ``True``)
907
-
908
- A valid block must contain at least one ``blockStatement``.
909
-
910
- (Note that indentedBlock uses internal parse actions which make it
911
- incompatible with packrat parsing.)
912
-
913
- Example::
914
-
915
- data = '''
916
- def A(z):
917
- A1
918
- B = 100
919
- G = A2
920
- A2
921
- A3
922
- B
923
- def BB(a,b,c):
924
- BB1
925
- def BBA():
926
- bba1
927
- bba2
928
- bba3
929
- C
930
- D
931
- def spam(x,y):
932
- def eggs(z):
933
- pass
934
- '''
935
-
936
-
937
- indentStack = [1]
938
- stmt = Forward()
939
-
940
- identifier = Word(alphas, alphanums)
941
- funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
942
- func_body = indentedBlock(stmt, indentStack)
943
- funcDef = Group(funcDecl + func_body)
944
-
945
- rvalue = Forward()
946
- funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
947
- rvalue << (funcCall | identifier | Word(nums))
948
- assignment = Group(identifier + "=" + rvalue)
949
- stmt << (funcDef | assignment | identifier)
950
-
951
- module_body = stmt[1, ...]
952
-
953
- parseTree = module_body.parseString(data)
954
- parseTree.pprint()
955
-
956
- prints::
957
-
958
- [['def',
959
- 'A',
960
- ['(', 'z', ')'],
961
- ':',
962
- [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
963
- 'B',
964
- ['def',
965
- 'BB',
966
- ['(', 'a', 'b', 'c', ')'],
967
- ':',
968
- [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
969
- 'C',
970
- 'D',
971
- ['def',
972
- 'spam',
973
- ['(', 'x', 'y', ')'],
974
- ':',
975
- [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
976
- """
977
- backup_stacks.append(indentStack[:])
978
-
979
- def reset_stack():
980
- indentStack[:] = backup_stacks[-1]
981
-
982
- def checkPeerIndent(s, l, t):
983
- if l >= len(s):
984
- return
985
- curCol = col(l, s)
986
- if curCol != indentStack[-1]:
987
- if curCol > indentStack[-1]:
988
- raise ParseException(s, l, "illegal nesting")
989
- raise ParseException(s, l, "not a peer entry")
990
-
991
- def checkSubIndent(s, l, t):
992
- curCol = col(l, s)
993
- if curCol > indentStack[-1]:
994
- indentStack.append(curCol)
995
- else:
996
- raise ParseException(s, l, "not a subentry")
997
-
998
- def checkUnindent(s, l, t):
999
- if l >= len(s):
1000
- return
1001
- curCol = col(l, s)
1002
- if not (indentStack and curCol in indentStack):
1003
- raise ParseException(s, l, "not an unindent")
1004
- if curCol < indentStack[-1]:
1005
- indentStack.pop()
1006
-
1007
- NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
1008
- INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
1009
- PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
1010
- UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
1011
- if indent:
1012
- smExpr = Group(
1013
- Opt(NL)
1014
- + INDENT
1015
- + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
1016
- + UNDENT
1017
- )
1018
- else:
1019
- smExpr = Group(
1020
- Opt(NL)
1021
- + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
1022
- + Opt(UNDENT)
1023
- )
1024
-
1025
- # add a parse action to remove backup_stack from list of backups
1026
- smExpr.add_parse_action(
1027
- lambda: backup_stacks.pop(-1) and None if backup_stacks else None
1028
- )
1029
- smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
1030
- blockStatementExpr.ignore(_bslash + LineEnd())
1031
- return smExpr.set_name("indented block")
1032
-
1033
-
1034
- # it's easy to get these comment structures wrong - they're very common, so may as well make them available
1035
- c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
1036
- "C style comment"
1037
- )
1038
- "Comment of the form ``/* ... */``"
1039
-
1040
- html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
1041
- "Comment of the form ``<!-- ... -->``"
1042
-
1043
- rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
1044
- dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
1045
- "Comment of the form ``// ... (to end of line)``"
1046
-
1047
- cpp_style_comment = Combine(
1048
- Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
1049
- ).set_name("C++ style comment")
1050
- "Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
1051
-
1052
- java_style_comment = cpp_style_comment
1053
- "Same as :class:`cpp_style_comment`"
1054
-
1055
- python_style_comment = Regex(r"#.*").set_name("Python style comment")
1056
- "Comment of the form ``# ... (to end of line)``"
1057
-
1058
-
1059
- # build list of built-in expressions, for future reference if a global default value
1060
- # gets updated
1061
- _builtin_exprs: List[ParserElement] = [
1062
- v for v in vars().values() if isinstance(v, ParserElement)
1063
- ]
1064
-
1065
-
1066
- # pre-PEP8 compatible names
1067
- delimitedList = delimited_list
1068
- countedArray = counted_array
1069
- matchPreviousLiteral = match_previous_literal
1070
- matchPreviousExpr = match_previous_expr
1071
- oneOf = one_of
1072
- dictOf = dict_of
1073
- originalTextFor = original_text_for
1074
- nestedExpr = nested_expr
1075
- makeHTMLTags = make_html_tags
1076
- makeXMLTags = make_xml_tags
1077
- anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
1078
- commonHTMLEntity = common_html_entity
1079
- replaceHTMLEntity = replace_html_entity
1080
- opAssoc = OpAssoc
1081
- infixNotation = infix_notation
1082
- cStyleComment = c_style_comment
1083
- htmlComment = html_comment
1084
- restOfLine = rest_of_line
1085
- dblSlashComment = dbl_slash_comment
1086
- cppStyleComment = cpp_style_comment
1087
- javaStyleComment = java_style_comment
1088
- pythonStyleComment = python_style_comment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/webencodings/mklabels.py DELETED
@@ -1,59 +0,0 @@
1
- """
2
-
3
- webencodings.mklabels
4
- ~~~~~~~~~~~~~~~~~~~~~
5
-
6
- Regenarate the webencodings.labels module.
7
-
8
- :copyright: Copyright 2012 by Simon Sapin
9
- :license: BSD, see LICENSE for details.
10
-
11
- """
12
-
13
- import json
14
- try:
15
- from urllib import urlopen
16
- except ImportError:
17
- from urllib.request import urlopen
18
-
19
-
20
- def assert_lower(string):
21
- assert string == string.lower()
22
- return string
23
-
24
-
25
- def generate(url):
26
- parts = ['''\
27
- """
28
-
29
- webencodings.labels
30
- ~~~~~~~~~~~~~~~~~~~
31
-
32
- Map encoding labels to their name.
33
-
34
- :copyright: Copyright 2012 by Simon Sapin
35
- :license: BSD, see LICENSE for details.
36
-
37
- """
38
-
39
- # XXX Do not edit!
40
- # This file is automatically generated by mklabels.py
41
-
42
- LABELS = {
43
- ''']
44
- labels = [
45
- (repr(assert_lower(label)).lstrip('u'),
46
- repr(encoding['name']).lstrip('u'))
47
- for category in json.loads(urlopen(url).read().decode('ascii'))
48
- for encoding in category['encodings']
49
- for label in encoding['labels']]
50
- max_len = max(len(label) for label, name in labels)
51
- parts.extend(
52
- ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
53
- for label, name in labels)
54
- parts.append('}')
55
- return ''.join(parts)
56
-
57
-
58
- if __name__ == '__main__':
59
- print(generate('http://encoding.spec.whatwg.org/encodings.json'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/jaraco/functools.py DELETED
@@ -1,525 +0,0 @@
1
- import functools
2
- import time
3
- import inspect
4
- import collections
5
- import types
6
- import itertools
7
-
8
- import pkg_resources.extern.more_itertools
9
-
10
- from typing import Callable, TypeVar
11
-
12
-
13
- CallableT = TypeVar("CallableT", bound=Callable[..., object])
14
-
15
-
16
- def compose(*funcs):
17
- """
18
- Compose any number of unary functions into a single unary function.
19
-
20
- >>> import textwrap
21
- >>> expected = str.strip(textwrap.dedent(compose.__doc__))
22
- >>> strip_and_dedent = compose(str.strip, textwrap.dedent)
23
- >>> strip_and_dedent(compose.__doc__) == expected
24
- True
25
-
26
- Compose also allows the innermost function to take arbitrary arguments.
27
-
28
- >>> round_three = lambda x: round(x, ndigits=3)
29
- >>> f = compose(round_three, int.__truediv__)
30
- >>> [f(3*x, x+1) for x in range(1,10)]
31
- [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
32
- """
33
-
34
- def compose_two(f1, f2):
35
- return lambda *args, **kwargs: f1(f2(*args, **kwargs))
36
-
37
- return functools.reduce(compose_two, funcs)
38
-
39
-
40
- def method_caller(method_name, *args, **kwargs):
41
- """
42
- Return a function that will call a named method on the
43
- target object with optional positional and keyword
44
- arguments.
45
-
46
- >>> lower = method_caller('lower')
47
- >>> lower('MyString')
48
- 'mystring'
49
- """
50
-
51
- def call_method(target):
52
- func = getattr(target, method_name)
53
- return func(*args, **kwargs)
54
-
55
- return call_method
56
-
57
-
58
- def once(func):
59
- """
60
- Decorate func so it's only ever called the first time.
61
-
62
- This decorator can ensure that an expensive or non-idempotent function
63
- will not be expensive on subsequent calls and is idempotent.
64
-
65
- >>> add_three = once(lambda a: a+3)
66
- >>> add_three(3)
67
- 6
68
- >>> add_three(9)
69
- 6
70
- >>> add_three('12')
71
- 6
72
-
73
- To reset the stored value, simply clear the property ``saved_result``.
74
-
75
- >>> del add_three.saved_result
76
- >>> add_three(9)
77
- 12
78
- >>> add_three(8)
79
- 12
80
-
81
- Or invoke 'reset()' on it.
82
-
83
- >>> add_three.reset()
84
- >>> add_three(-3)
85
- 0
86
- >>> add_three(0)
87
- 0
88
- """
89
-
90
- @functools.wraps(func)
91
- def wrapper(*args, **kwargs):
92
- if not hasattr(wrapper, 'saved_result'):
93
- wrapper.saved_result = func(*args, **kwargs)
94
- return wrapper.saved_result
95
-
96
- wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
97
- return wrapper
98
-
99
-
100
- def method_cache(
101
- method: CallableT,
102
- cache_wrapper: Callable[
103
- [CallableT], CallableT
104
- ] = functools.lru_cache(), # type: ignore[assignment]
105
- ) -> CallableT:
106
- """
107
- Wrap lru_cache to support storing the cache data in the object instances.
108
-
109
- Abstracts the common paradigm where the method explicitly saves an
110
- underscore-prefixed protected property on first call and returns that
111
- subsequently.
112
-
113
- >>> class MyClass:
114
- ... calls = 0
115
- ...
116
- ... @method_cache
117
- ... def method(self, value):
118
- ... self.calls += 1
119
- ... return value
120
-
121
- >>> a = MyClass()
122
- >>> a.method(3)
123
- 3
124
- >>> for x in range(75):
125
- ... res = a.method(x)
126
- >>> a.calls
127
- 75
128
-
129
- Note that the apparent behavior will be exactly like that of lru_cache
130
- except that the cache is stored on each instance, so values in one
131
- instance will not flush values from another, and when an instance is
132
- deleted, so are the cached values for that instance.
133
-
134
- >>> b = MyClass()
135
- >>> for x in range(35):
136
- ... res = b.method(x)
137
- >>> b.calls
138
- 35
139
- >>> a.method(0)
140
- 0
141
- >>> a.calls
142
- 75
143
-
144
- Note that if method had been decorated with ``functools.lru_cache()``,
145
- a.calls would have been 76 (due to the cached value of 0 having been
146
- flushed by the 'b' instance).
147
-
148
- Clear the cache with ``.cache_clear()``
149
-
150
- >>> a.method.cache_clear()
151
-
152
- Same for a method that hasn't yet been called.
153
-
154
- >>> c = MyClass()
155
- >>> c.method.cache_clear()
156
-
157
- Another cache wrapper may be supplied:
158
-
159
- >>> cache = functools.lru_cache(maxsize=2)
160
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
161
- >>> a = MyClass()
162
- >>> a.method2()
163
- 3
164
-
165
- Caution - do not subsequently wrap the method with another decorator, such
166
- as ``@property``, which changes the semantics of the function.
167
-
168
- See also
169
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
170
- for another implementation and additional justification.
171
- """
172
-
173
- def wrapper(self: object, *args: object, **kwargs: object) -> object:
174
- # it's the first call, replace the method with a cached, bound method
175
- bound_method: CallableT = types.MethodType( # type: ignore[assignment]
176
- method, self
177
- )
178
- cached_method = cache_wrapper(bound_method)
179
- setattr(self, method.__name__, cached_method)
180
- return cached_method(*args, **kwargs)
181
-
182
- # Support cache clear even before cache has been created.
183
- wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
184
-
185
- return ( # type: ignore[return-value]
186
- _special_method_cache(method, cache_wrapper) or wrapper
187
- )
188
-
189
-
190
- def _special_method_cache(method, cache_wrapper):
191
- """
192
- Because Python treats special methods differently, it's not
193
- possible to use instance attributes to implement the cached
194
- methods.
195
-
196
- Instead, install the wrapper method under a different name
197
- and return a simple proxy to that wrapper.
198
-
199
- https://github.com/jaraco/jaraco.functools/issues/5
200
- """
201
- name = method.__name__
202
- special_names = '__getattr__', '__getitem__'
203
- if name not in special_names:
204
- return
205
-
206
- wrapper_name = '__cached' + name
207
-
208
- def proxy(self, *args, **kwargs):
209
- if wrapper_name not in vars(self):
210
- bound = types.MethodType(method, self)
211
- cache = cache_wrapper(bound)
212
- setattr(self, wrapper_name, cache)
213
- else:
214
- cache = getattr(self, wrapper_name)
215
- return cache(*args, **kwargs)
216
-
217
- return proxy
218
-
219
-
220
- def apply(transform):
221
- """
222
- Decorate a function with a transform function that is
223
- invoked on results returned from the decorated function.
224
-
225
- >>> @apply(reversed)
226
- ... def get_numbers(start):
227
- ... "doc for get_numbers"
228
- ... return range(start, start+3)
229
- >>> list(get_numbers(4))
230
- [6, 5, 4]
231
- >>> get_numbers.__doc__
232
- 'doc for get_numbers'
233
- """
234
-
235
- def wrap(func):
236
- return functools.wraps(func)(compose(transform, func))
237
-
238
- return wrap
239
-
240
-
241
- def result_invoke(action):
242
- r"""
243
- Decorate a function with an action function that is
244
- invoked on the results returned from the decorated
245
- function (for its side-effect), then return the original
246
- result.
247
-
248
- >>> @result_invoke(print)
249
- ... def add_two(a, b):
250
- ... return a + b
251
- >>> x = add_two(2, 3)
252
- 5
253
- >>> x
254
- 5
255
- """
256
-
257
- def wrap(func):
258
- @functools.wraps(func)
259
- def wrapper(*args, **kwargs):
260
- result = func(*args, **kwargs)
261
- action(result)
262
- return result
263
-
264
- return wrapper
265
-
266
- return wrap
267
-
268
-
269
- def call_aside(f, *args, **kwargs):
270
- """
271
- Call a function for its side effect after initialization.
272
-
273
- >>> @call_aside
274
- ... def func(): print("called")
275
- called
276
- >>> func()
277
- called
278
-
279
- Use functools.partial to pass parameters to the initial call
280
-
281
- >>> @functools.partial(call_aside, name='bingo')
282
- ... def func(name): print("called with", name)
283
- called with bingo
284
- """
285
- f(*args, **kwargs)
286
- return f
287
-
288
-
289
- class Throttler:
290
- """
291
- Rate-limit a function (or other callable)
292
- """
293
-
294
- def __init__(self, func, max_rate=float('Inf')):
295
- if isinstance(func, Throttler):
296
- func = func.func
297
- self.func = func
298
- self.max_rate = max_rate
299
- self.reset()
300
-
301
- def reset(self):
302
- self.last_called = 0
303
-
304
- def __call__(self, *args, **kwargs):
305
- self._wait()
306
- return self.func(*args, **kwargs)
307
-
308
- def _wait(self):
309
- "ensure at least 1/max_rate seconds from last call"
310
- elapsed = time.time() - self.last_called
311
- must_wait = 1 / self.max_rate - elapsed
312
- time.sleep(max(0, must_wait))
313
- self.last_called = time.time()
314
-
315
- def __get__(self, obj, type=None):
316
- return first_invoke(self._wait, functools.partial(self.func, obj))
317
-
318
-
319
- def first_invoke(func1, func2):
320
- """
321
- Return a function that when invoked will invoke func1 without
322
- any parameters (for its side-effect) and then invoke func2
323
- with whatever parameters were passed, returning its result.
324
- """
325
-
326
- def wrapper(*args, **kwargs):
327
- func1()
328
- return func2(*args, **kwargs)
329
-
330
- return wrapper
331
-
332
-
333
- def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
334
- """
335
- Given a callable func, trap the indicated exceptions
336
- for up to 'retries' times, invoking cleanup on the
337
- exception. On the final attempt, allow any exceptions
338
- to propagate.
339
- """
340
- attempts = itertools.count() if retries == float('inf') else range(retries)
341
- for attempt in attempts:
342
- try:
343
- return func()
344
- except trap:
345
- cleanup()
346
-
347
- return func()
348
-
349
-
350
- def retry(*r_args, **r_kwargs):
351
- """
352
- Decorator wrapper for retry_call. Accepts arguments to retry_call
353
- except func and then returns a decorator for the decorated function.
354
-
355
- Ex:
356
-
357
- >>> @retry(retries=3)
358
- ... def my_func(a, b):
359
- ... "this is my funk"
360
- ... print(a, b)
361
- >>> my_func.__doc__
362
- 'this is my funk'
363
- """
364
-
365
- def decorate(func):
366
- @functools.wraps(func)
367
- def wrapper(*f_args, **f_kwargs):
368
- bound = functools.partial(func, *f_args, **f_kwargs)
369
- return retry_call(bound, *r_args, **r_kwargs)
370
-
371
- return wrapper
372
-
373
- return decorate
374
-
375
-
376
- def print_yielded(func):
377
- """
378
- Convert a generator into a function that prints all yielded elements
379
-
380
- >>> @print_yielded
381
- ... def x():
382
- ... yield 3; yield None
383
- >>> x()
384
- 3
385
- None
386
- """
387
- print_all = functools.partial(map, print)
388
- print_results = compose(more_itertools.consume, print_all, func)
389
- return functools.wraps(func)(print_results)
390
-
391
-
392
- def pass_none(func):
393
- """
394
- Wrap func so it's not called if its first param is None
395
-
396
- >>> print_text = pass_none(print)
397
- >>> print_text('text')
398
- text
399
- >>> print_text(None)
400
- """
401
-
402
- @functools.wraps(func)
403
- def wrapper(param, *args, **kwargs):
404
- if param is not None:
405
- return func(param, *args, **kwargs)
406
-
407
- return wrapper
408
-
409
-
410
- def assign_params(func, namespace):
411
- """
412
- Assign parameters from namespace where func solicits.
413
-
414
- >>> def func(x, y=3):
415
- ... print(x, y)
416
- >>> assigned = assign_params(func, dict(x=2, z=4))
417
- >>> assigned()
418
- 2 3
419
-
420
- The usual errors are raised if a function doesn't receive
421
- its required parameters:
422
-
423
- >>> assigned = assign_params(func, dict(y=3, z=4))
424
- >>> assigned()
425
- Traceback (most recent call last):
426
- TypeError: func() ...argument...
427
-
428
- It even works on methods:
429
-
430
- >>> class Handler:
431
- ... def meth(self, arg):
432
- ... print(arg)
433
- >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
434
- crystal
435
- """
436
- sig = inspect.signature(func)
437
- params = sig.parameters.keys()
438
- call_ns = {k: namespace[k] for k in params if k in namespace}
439
- return functools.partial(func, **call_ns)
440
-
441
-
442
- def save_method_args(method):
443
- """
444
- Wrap a method such that when it is called, the args and kwargs are
445
- saved on the method.
446
-
447
- >>> class MyClass:
448
- ... @save_method_args
449
- ... def method(self, a, b):
450
- ... print(a, b)
451
- >>> my_ob = MyClass()
452
- >>> my_ob.method(1, 2)
453
- 1 2
454
- >>> my_ob._saved_method.args
455
- (1, 2)
456
- >>> my_ob._saved_method.kwargs
457
- {}
458
- >>> my_ob.method(a=3, b='foo')
459
- 3 foo
460
- >>> my_ob._saved_method.args
461
- ()
462
- >>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
463
- True
464
-
465
- The arguments are stored on the instance, allowing for
466
- different instance to save different args.
467
-
468
- >>> your_ob = MyClass()
469
- >>> your_ob.method({str('x'): 3}, b=[4])
470
- {'x': 3} [4]
471
- >>> your_ob._saved_method.args
472
- ({'x': 3},)
473
- >>> my_ob._saved_method.args
474
- ()
475
- """
476
- args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
477
-
478
- @functools.wraps(method)
479
- def wrapper(self, *args, **kwargs):
480
- attr_name = '_saved_' + method.__name__
481
- attr = args_and_kwargs(args, kwargs)
482
- setattr(self, attr_name, attr)
483
- return method(self, *args, **kwargs)
484
-
485
- return wrapper
486
-
487
-
488
- def except_(*exceptions, replace=None, use=None):
489
- """
490
- Replace the indicated exceptions, if raised, with the indicated
491
- literal replacement or evaluated expression (if present).
492
-
493
- >>> safe_int = except_(ValueError)(int)
494
- >>> safe_int('five')
495
- >>> safe_int('5')
496
- 5
497
-
498
- Specify a literal replacement with ``replace``.
499
-
500
- >>> safe_int_r = except_(ValueError, replace=0)(int)
501
- >>> safe_int_r('five')
502
- 0
503
-
504
- Provide an expression to ``use`` to pass through particular parameters.
505
-
506
- >>> safe_int_pt = except_(ValueError, use='args[0]')(int)
507
- >>> safe_int_pt('five')
508
- 'five'
509
-
510
- """
511
-
512
- def decorate(func):
513
- @functools.wraps(func)
514
- def wrapper(*args, **kwargs):
515
- try:
516
- return func(*args, **kwargs)
517
- except exceptions:
518
- try:
519
- return eval(use)
520
- except TypeError:
521
- return replace
522
-
523
- return wrapper
524
-
525
- return decorate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/util.py DELETED
@@ -1,235 +0,0 @@
1
- # util.py
2
- import warnings
3
- import types
4
- import collections
5
- import itertools
6
- from functools import lru_cache
7
- from typing import List, Union, Iterable
8
-
9
- _bslash = chr(92)
10
-
11
-
12
- class __config_flags:
13
- """Internal class for defining compatibility and debugging flags"""
14
-
15
- _all_names: List[str] = []
16
- _fixed_names: List[str] = []
17
- _type_desc = "configuration"
18
-
19
- @classmethod
20
- def _set(cls, dname, value):
21
- if dname in cls._fixed_names:
22
- warnings.warn(
23
- "{}.{} {} is {} and cannot be overridden".format(
24
- cls.__name__,
25
- dname,
26
- cls._type_desc,
27
- str(getattr(cls, dname)).upper(),
28
- )
29
- )
30
- return
31
- if dname in cls._all_names:
32
- setattr(cls, dname, value)
33
- else:
34
- raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
35
-
36
- enable = classmethod(lambda cls, name: cls._set(name, True))
37
- disable = classmethod(lambda cls, name: cls._set(name, False))
38
-
39
-
40
- @lru_cache(maxsize=128)
41
- def col(loc: int, strg: str) -> int:
42
- """
43
- Returns current column within a string, counting newlines as line separators.
44
- The first column is number 1.
45
-
46
- Note: the default parsing behavior is to expand tabs in the input string
47
- before starting the parsing process. See
48
- :class:`ParserElement.parseString` for more
49
- information on parsing strings containing ``<TAB>`` s, and suggested
50
- methods to maintain a consistent view of the parsed string, the parse
51
- location, and line and column positions within the parsed string.
52
- """
53
- s = strg
54
- return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
55
-
56
-
57
- @lru_cache(maxsize=128)
58
- def lineno(loc: int, strg: str) -> int:
59
- """Returns current line number within a string, counting newlines as line separators.
60
- The first line is number 1.
61
-
62
- Note - the default parsing behavior is to expand tabs in the input string
63
- before starting the parsing process. See :class:`ParserElement.parseString`
64
- for more information on parsing strings containing ``<TAB>`` s, and
65
- suggested methods to maintain a consistent view of the parsed string, the
66
- parse location, and line and column positions within the parsed string.
67
- """
68
- return strg.count("\n", 0, loc) + 1
69
-
70
-
71
- @lru_cache(maxsize=128)
72
- def line(loc: int, strg: str) -> str:
73
- """
74
- Returns the line of text containing loc within a string, counting newlines as line separators.
75
- """
76
- last_cr = strg.rfind("\n", 0, loc)
77
- next_cr = strg.find("\n", loc)
78
- return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
79
-
80
-
81
- class _UnboundedCache:
82
- def __init__(self):
83
- cache = {}
84
- cache_get = cache.get
85
- self.not_in_cache = not_in_cache = object()
86
-
87
- def get(_, key):
88
- return cache_get(key, not_in_cache)
89
-
90
- def set_(_, key, value):
91
- cache[key] = value
92
-
93
- def clear(_):
94
- cache.clear()
95
-
96
- self.size = None
97
- self.get = types.MethodType(get, self)
98
- self.set = types.MethodType(set_, self)
99
- self.clear = types.MethodType(clear, self)
100
-
101
-
102
- class _FifoCache:
103
- def __init__(self, size):
104
- self.not_in_cache = not_in_cache = object()
105
- cache = collections.OrderedDict()
106
- cache_get = cache.get
107
-
108
- def get(_, key):
109
- return cache_get(key, not_in_cache)
110
-
111
- def set_(_, key, value):
112
- cache[key] = value
113
- while len(cache) > size:
114
- cache.popitem(last=False)
115
-
116
- def clear(_):
117
- cache.clear()
118
-
119
- self.size = size
120
- self.get = types.MethodType(get, self)
121
- self.set = types.MethodType(set_, self)
122
- self.clear = types.MethodType(clear, self)
123
-
124
-
125
- class LRUMemo:
126
- """
127
- A memoizing mapping that retains `capacity` deleted items
128
-
129
- The memo tracks retained items by their access order; once `capacity` items
130
- are retained, the least recently used item is discarded.
131
- """
132
-
133
- def __init__(self, capacity):
134
- self._capacity = capacity
135
- self._active = {}
136
- self._memory = collections.OrderedDict()
137
-
138
- def __getitem__(self, key):
139
- try:
140
- return self._active[key]
141
- except KeyError:
142
- self._memory.move_to_end(key)
143
- return self._memory[key]
144
-
145
- def __setitem__(self, key, value):
146
- self._memory.pop(key, None)
147
- self._active[key] = value
148
-
149
- def __delitem__(self, key):
150
- try:
151
- value = self._active.pop(key)
152
- except KeyError:
153
- pass
154
- else:
155
- while len(self._memory) >= self._capacity:
156
- self._memory.popitem(last=False)
157
- self._memory[key] = value
158
-
159
- def clear(self):
160
- self._active.clear()
161
- self._memory.clear()
162
-
163
-
164
- class UnboundedMemo(dict):
165
- """
166
- A memoizing mapping that retains all deleted items
167
- """
168
-
169
- def __delitem__(self, key):
170
- pass
171
-
172
-
173
- def _escape_regex_range_chars(s: str) -> str:
174
- # escape these chars: ^-[]
175
- for c in r"\^-[]":
176
- s = s.replace(c, _bslash + c)
177
- s = s.replace("\n", r"\n")
178
- s = s.replace("\t", r"\t")
179
- return str(s)
180
-
181
-
182
- def _collapse_string_to_ranges(
183
- s: Union[str, Iterable[str]], re_escape: bool = True
184
- ) -> str:
185
- def is_consecutive(c):
186
- c_int = ord(c)
187
- is_consecutive.prev, prev = c_int, is_consecutive.prev
188
- if c_int - prev > 1:
189
- is_consecutive.value = next(is_consecutive.counter)
190
- return is_consecutive.value
191
-
192
- is_consecutive.prev = 0
193
- is_consecutive.counter = itertools.count()
194
- is_consecutive.value = -1
195
-
196
- def escape_re_range_char(c):
197
- return "\\" + c if c in r"\^-][" else c
198
-
199
- def no_escape_re_range_char(c):
200
- return c
201
-
202
- if not re_escape:
203
- escape_re_range_char = no_escape_re_range_char
204
-
205
- ret = []
206
- s = "".join(sorted(set(s)))
207
- if len(s) > 3:
208
- for _, chars in itertools.groupby(s, key=is_consecutive):
209
- first = last = next(chars)
210
- last = collections.deque(
211
- itertools.chain(iter([last]), chars), maxlen=1
212
- ).pop()
213
- if first == last:
214
- ret.append(escape_re_range_char(first))
215
- else:
216
- sep = "" if ord(last) == ord(first) + 1 else "-"
217
- ret.append(
218
- "{}{}{}".format(
219
- escape_re_range_char(first), sep, escape_re_range_char(last)
220
- )
221
- )
222
- else:
223
- ret = [escape_re_range_char(c) for c in s]
224
-
225
- return "".join(ret)
226
-
227
-
228
- def _flatten(ll: list) -> list:
229
- ret = []
230
- for i in ll:
231
- if isinstance(i, list):
232
- ret.extend(_flatten(i))
233
- else:
234
- ret.append(i)
235
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/packaging/README.md DELETED
@@ -1,17 +0,0 @@
1
-
2
- ## To build a cu101 wheel for release:
3
-
4
- ```
5
- $ nvidia-docker run -it --storage-opt "size=20GB" --name pt pytorch/manylinux-cuda101
6
- # inside the container:
7
- # git clone https://github.com/facebookresearch/detectron2/
8
- # cd detectron2
9
- # export CU_VERSION=cu101 D2_VERSION_SUFFIX= PYTHON_VERSION=3.7 PYTORCH_VERSION=1.4
10
- # ./dev/packaging/build_wheel.sh
11
- ```
12
-
13
- ## To build all wheels for `CUDA {9.2,10.0,10.1}` x `Python {3.6,3.7,3.8}`:
14
- ```
15
- ./dev/packaging/build_all_wheels.sh
16
- ./dev/packaging/gen_wheel_index.sh /path/to/wheels
17
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GFPGAN-example/inference_gfpgan.py DELETED
@@ -1,116 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import numpy as np
5
- import os
6
- import torch
7
- from basicsr.utils import imwrite
8
-
9
- from gfpgan import GFPGANer
10
-
11
-
12
- def main():
13
- """Inference demo for GFPGAN.
14
- """
15
- parser = argparse.ArgumentParser()
16
- parser.add_argument('--upscale', type=int, default=2, help='The final upsampling scale of the image')
17
- parser.add_argument('--arch', type=str, default='clean', help='The GFPGAN architecture. Option: clean | original')
18
- parser.add_argument('--channel', type=int, default=2, help='Channel multiplier for large networks of StyleGAN2')
19
- parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/GFPGANCleanv1-NoCE-C2.pth')
20
- parser.add_argument('--bg_upsampler', type=str, default='realesrgan', help='background upsampler')
21
- parser.add_argument(
22
- '--bg_tile', type=int, default=400, help='Tile size for background sampler, 0 for no tile during testing')
23
- parser.add_argument('--test_path', type=str, default='inputs/whole_imgs', help='Input folder')
24
- parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces')
25
- parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face')
26
- parser.add_argument('--aligned', action='store_true', help='Input are aligned faces')
27
- parser.add_argument('--paste_back', action='store_false', help='Paste the restored faces back to images')
28
- parser.add_argument('--save_root', type=str, default='results', help='Path to save root')
29
- parser.add_argument(
30
- '--ext',
31
- type=str,
32
- default='auto',
33
- help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
34
- args = parser.parse_args()
35
-
36
- args = parser.parse_args()
37
- if args.test_path.endswith('/'):
38
- args.test_path = args.test_path[:-1]
39
- os.makedirs(args.save_root, exist_ok=True)
40
-
41
- # background upsampler
42
- if args.bg_upsampler == 'realesrgan':
43
- if not torch.cuda.is_available(): # CPU
44
- import warnings
45
- warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. '
46
- 'If you really want to use it, please modify the corresponding codes.')
47
- bg_upsampler = None
48
- else:
49
- from basicsr.archs.rrdbnet_arch import RRDBNet
50
- from realesrgan import RealESRGANer
51
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
52
- bg_upsampler = RealESRGANer(
53
- scale=2,
54
- model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
55
- model=model,
56
- tile=args.bg_tile,
57
- tile_pad=10,
58
- pre_pad=0,
59
- half=True) # need to set False in CPU mode
60
- else:
61
- bg_upsampler = None
62
- # set up GFPGAN restorer
63
- restorer = GFPGANer(
64
- model_path=args.model_path,
65
- upscale=args.upscale,
66
- arch=args.arch,
67
- channel_multiplier=args.channel,
68
- bg_upsampler=bg_upsampler)
69
-
70
- img_list = sorted(glob.glob(os.path.join(args.test_path, '*')))
71
- for img_path in img_list:
72
- # read image
73
- img_name = os.path.basename(img_path)
74
- print(f'Processing {img_name} ...')
75
- basename, ext = os.path.splitext(img_name)
76
- input_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
77
-
78
- # restore faces and background if necessary
79
- cropped_faces, restored_faces, restored_img = restorer.enhance(
80
- input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=args.paste_back)
81
-
82
- # save faces
83
- for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)):
84
- # save cropped face
85
- save_crop_path = os.path.join(args.save_root, 'cropped_faces', f'{basename}_{idx:02d}.png')
86
- imwrite(cropped_face, save_crop_path)
87
- # save restored face
88
- if args.suffix is not None:
89
- save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png'
90
- else:
91
- save_face_name = f'{basename}_{idx:02d}.png'
92
- save_restore_path = os.path.join(args.save_root, 'restored_faces', save_face_name)
93
- imwrite(restored_face, save_restore_path)
94
- # save comparison image
95
- cmp_img = np.concatenate((cropped_face, restored_face), axis=1)
96
- imwrite(cmp_img, os.path.join(args.save_root, 'cmp', f'{basename}_{idx:02d}.png'))
97
-
98
- # save restored img
99
- if restored_img is not None:
100
- if args.ext == 'auto':
101
- extension = ext[1:]
102
- else:
103
- extension = args.ext
104
-
105
- if args.suffix is not None:
106
- save_restore_path = os.path.join(args.save_root, 'restored_imgs',
107
- f'{basename}_{args.suffix}.{extension}')
108
- else:
109
- save_restore_path = os.path.join(args.save_root, 'restored_imgs', f'{basename}.{extension}')
110
- imwrite(restored_img, save_restore_path)
111
-
112
- print(f'Results are in the [{args.save_root}] folder.')
113
-
114
-
115
- if __name__ == '__main__':
116
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/copy_if.h DELETED
@@ -1,75 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/execution_policy.h>
21
-
22
- namespace thrust
23
- {
24
-
25
-
26
- template<typename DerivedPolicy,
27
- typename InputIterator,
28
- typename OutputIterator,
29
- typename Predicate>
30
- __host__ __device__
31
- OutputIterator copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
32
- InputIterator first,
33
- InputIterator last,
34
- OutputIterator result,
35
- Predicate pred);
36
-
37
-
38
- template<typename DerivedPolicy,
39
- typename InputIterator1,
40
- typename InputIterator2,
41
- typename OutputIterator,
42
- typename Predicate>
43
- __host__ __device__
44
- OutputIterator copy_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
45
- InputIterator1 first,
46
- InputIterator1 last,
47
- InputIterator2 stencil,
48
- OutputIterator result,
49
- Predicate pred);
50
-
51
-
52
- template<typename InputIterator,
53
- typename OutputIterator,
54
- typename Predicate>
55
- OutputIterator copy_if(InputIterator first,
56
- InputIterator last,
57
- OutputIterator result,
58
- Predicate pred);
59
-
60
-
61
- template<typename InputIterator1,
62
- typename InputIterator2,
63
- typename OutputIterator,
64
- typename Predicate>
65
- OutputIterator copy_if(InputIterator1 first,
66
- InputIterator1 last,
67
- InputIterator2 stencil,
68
- OutputIterator result,
69
- Predicate pred);
70
-
71
-
72
- } // end thrust
73
-
74
- #include <thrust/detail/copy_if.inl>
75
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/sequence.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the sequence.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch sequence
24
-
25
- #include <thrust/system/detail/sequential/sequence.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/sequence.h>
32
- #include <thrust/system/cuda/detail/sequence.h>
33
- #include <thrust/system/omp/detail/sequence.h>
34
- #include <thrust/system/tbb/detail/sequence.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_SEQUENCE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/sequence.h>
38
- #include __THRUST_HOST_SYSTEM_SEQUENCE_HEADER
39
- #undef __THRUST_HOST_SYSTEM_SEQUENCE_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_SEQUENCE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/sequence.h>
42
- #include __THRUST_DEVICE_SYSTEM_SEQUENCE_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_SEQUENCE_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/temporary_buffer.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special temporary buffer functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system_error.h DELETED
@@ -1,51 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file thrust/system_error.h
18
- * \brief System diagnostics
19
- */
20
-
21
- #pragma once
22
-
23
- #include <thrust/detail/config.h>
24
-
25
- namespace thrust
26
- {
27
-
28
- /*! \addtogroup system
29
- * \{
30
- */
31
-
32
- /*! \namespace thrust::system
33
- * \brief \p thrust::system is the namespace which contains functionality for manipulating
34
- * memory specific to one of Thrust's backend systems. It also contains functionality
35
- * for reporting error conditions originating from the operating system or other
36
- * low-level application program interfaces such as the CUDA runtime.
37
- * They are provided in a separate namespace for import convenience but are
38
- * also aliased in the top-level \p thrust namespace for easy access.
39
- */
40
- namespace system
41
- {
42
- } // end system
43
-
44
- /*! \} // end system
45
- */
46
-
47
- } // end thrust
48
-
49
- #include <thrust/system/error_code.h>
50
- #include <thrust/system/system_error.h>
51
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/SPOTER_Sign_Language_Recognition/app.py DELETED
@@ -1,181 +0,0 @@
1
- import copy
2
-
3
- import torch
4
- import numpy as np
5
- import gradio as gr
6
- from spoter_mod.skeleton_extractor import obtain_pose_data
7
- from spoter_mod.normalization.body_normalization import normalize_single_dict as normalize_single_body_dict, BODY_IDENTIFIERS
8
- from spoter_mod.normalization.hand_normalization import normalize_single_dict as normalize_single_hand_dict, HAND_IDENTIFIERS
9
-
10
-
11
- model = torch.load("spoter-checkpoint.pth", map_location=torch.device('cpu'))
12
- model.train(False)
13
-
14
- HAND_IDENTIFIERS = [id + "_Left" for id in HAND_IDENTIFIERS] + [id + "_Right" for id in HAND_IDENTIFIERS]
15
- GLOSS = ['book', 'drink', 'computer', 'before', 'chair', 'go', 'clothes', 'who', 'candy', 'cousin', 'deaf', 'fine',
16
- 'help', 'no', 'thin', 'walk', 'year', 'yes', 'all', 'black', 'cool', 'finish', 'hot', 'like', 'many', 'mother',
17
- 'now', 'orange', 'table', 'thanksgiving', 'what', 'woman', 'bed', 'blue', 'bowling', 'can', 'dog', 'family',
18
- 'fish', 'graduate', 'hat', 'hearing', 'kiss', 'language', 'later', 'man', 'shirt', 'study', 'tall', 'white',
19
- 'wrong', 'accident', 'apple', 'bird', 'change', 'color', 'corn', 'cow', 'dance', 'dark', 'doctor', 'eat',
20
- 'enjoy', 'forget', 'give', 'last', 'meet', 'pink', 'pizza', 'play', 'school', 'secretary', 'short', 'time',
21
- 'want', 'work', 'africa', 'basketball', 'birthday', 'brown', 'but', 'cheat', 'city', 'cook', 'decide', 'full',
22
- 'how', 'jacket', 'letter', 'medicine', 'need', 'paint', 'paper', 'pull', 'purple', 'right', 'same', 'son',
23
- 'tell', 'thursday']
24
-
25
- device = torch.device("cpu")
26
- if torch.cuda.is_available():
27
- device = torch.device("cuda")
28
-
29
-
30
- def tensor_to_dictionary(landmarks_tensor: torch.Tensor) -> dict:
31
-
32
- data_array = landmarks_tensor.numpy()
33
- output = {}
34
-
35
- for landmark_index, identifier in enumerate(BODY_IDENTIFIERS + HAND_IDENTIFIERS):
36
- output[identifier] = data_array[:, landmark_index]
37
-
38
- return output
39
-
40
-
41
- def dictionary_to_tensor(landmarks_dict: dict) -> torch.Tensor:
42
-
43
- output = np.empty(shape=(len(landmarks_dict["leftEar"]), len(BODY_IDENTIFIERS + HAND_IDENTIFIERS), 2))
44
-
45
- for landmark_index, identifier in enumerate(BODY_IDENTIFIERS + HAND_IDENTIFIERS):
46
- output[:, landmark_index, 0] = [frame[0] for frame in landmarks_dict[identifier]]
47
- output[:, landmark_index, 1] = [frame[1] for frame in landmarks_dict[identifier]]
48
-
49
- return torch.from_numpy(output)
50
-
51
-
52
- def greet(label, video0, video1):
53
-
54
- if label == "Webcam":
55
- video = video0
56
-
57
- elif label == "Video":
58
- video = video1
59
-
60
- elif label == "X":
61
- return {"A": 0.8, "B": 0.1, "C": 0.1}
62
-
63
- else:
64
- return {}
65
-
66
- data = obtain_pose_data(video)
67
-
68
- depth_map = np.empty(shape=(len(data.data_hub["nose_X"]), len(BODY_IDENTIFIERS + HAND_IDENTIFIERS), 2))
69
-
70
- for index, identifier in enumerate(BODY_IDENTIFIERS + HAND_IDENTIFIERS):
71
- depth_map[:, index, 0] = data.data_hub[identifier + "_X"]
72
- depth_map[:, index, 1] = data.data_hub[identifier + "_Y"]
73
-
74
- depth_map = torch.from_numpy(np.copy(depth_map))
75
-
76
- depth_map = tensor_to_dictionary(depth_map)
77
-
78
- keys = copy.copy(list(depth_map.keys()))
79
- for key in keys:
80
- data = depth_map[key]
81
- del depth_map[key]
82
- depth_map[key.replace("_Left", "_0").replace("_Right", "_1")] = data
83
-
84
- depth_map = normalize_single_body_dict(depth_map)
85
- depth_map = normalize_single_hand_dict(depth_map)
86
-
87
- keys = copy.copy(list(depth_map.keys()))
88
- for key in keys:
89
- data = depth_map[key]
90
- del depth_map[key]
91
- depth_map[key.replace("_0", "_Left").replace("_1", "_Right")] = data
92
-
93
- depth_map = dictionary_to_tensor(depth_map)
94
-
95
- depth_map = depth_map - 0.5
96
-
97
- inputs = depth_map.squeeze(0).to(device)
98
- outputs = model(inputs).expand(1, -1, -1)
99
- results = torch.nn.functional.softmax(outputs, dim=2).detach().numpy()[0, 0]
100
-
101
- results = {GLOSS[i]: float(results[i]) for i in range(100)}
102
-
103
- return results
104
-
105
-
106
- label = gr.outputs.Label(num_top_classes=5, label="Top class probabilities")
107
- demo = gr.Interface(fn=greet, inputs=[gr.Dropdown(["Webcam", "Video"], label="Please select the input type:", type="value"), gr.Video(source="webcam", label="Webcam recording", type="mp4"), gr.Video(source="upload", label="Video upload", type="mp4")], outputs=label,
108
- title="🤟 SPOTER Sign language recognition",
109
- description="""Current user interfaces are not accessible for D/deaf and hard-of-hearing users, whose natural communication medium is sign language. We work on AI systems for sign language to come closer to sign-driven technology and empower accessible apps, websites, and video conferencing platforms.
110
- Try out our recent model for sign language recognition right in your browser! The model below takes a video of a single sign in the American Sign Language at the input and provides you with probabilities of the lemmas (equivalent to words in natural language).
111
- ### Our work at CVPR
112
- Our efforts on lightweight and efficient models for sign language recognition were first introduced at WACV with our SPOTER paper. We now presented a work-in-progress follow-up here at CVPR's AVA workshop. Be sure to check our work and code below:
113
- - **WACV2022** - Original SPOTER paper - [Paper](https://openaccess.thecvf.com/content/WACV2022W/HADCV/papers/Bohacek_Sign_Pose-Based_Transformer_for_Word-Level_Sign_Language_Recognition_WACVW_2022_paper.pdf), [Code](https://github.com/matyasbohacek/spoter)
114
- - **CVPR2022 (AVA Worshop)** - Follow-up WIP – [Extended Abstract](https://drive.google.com/file/d/1Szbhi7ZwZ6VAWAcGcDDU6qV9Uj9xnDsS/view?usp=sharing), [Poster](https://drive.google.com/file/d/1_xvmTNbLjTrx6psKdsLkufAtfmI5wfbF/view?usp=sharing)
115
- ### How to sign?
116
- The model wrapped in this demo was trained on [WLASL100](https://dxli94.github.io/WLASL/), so it only knows selected ASL vocabulary. Take a look at these tutorial video examples (this is how you sign [computer](https://www.handspeak.com/word/search/index.php?id=449), [work](https://www.handspeak.com/word/search/index.php?id=2423), or [time](https://www.handspeak.com/word/search/index.php?id=2223)), try to replicate them yourself, and have them recognized using the webcam capture below. Have fun!
117
- > The demo can analyze webcam recordings or your uploaded videos. Before you hit Submit, **don't forget to select the input source in the dropdown first**.""",
118
- article="This is joint work of [Matyas Bohacek](https://scholar.google.cz/citations?user=wDy1xBwAAAAJ) and [Zhuo Cao](https://www.linkedin.com/in/zhuo-cao-b0787a1aa/?originalSubdomain=hk). For more info, visit [our website](https://www.signlanguagerecognition.com). To contact us, drop an e-mail [here](mailto:[email protected]).",
119
- css="""
120
- @font-face {
121
- font-family: Graphik;
122
- font-weight: regular;
123
- src: url("https://www.signlanguagerecognition.com/supplementary/GraphikRegular.otf") format("opentype");
124
- }
125
-
126
- @font-face {
127
- font-family: Graphik;
128
- font-weight: bold;
129
- src: url("https://www.signlanguagerecognition.com/supplementary/GraphikBold.otf") format("opentype");
130
- }
131
-
132
- @font-face {
133
- font-family: MonumentExpanded;
134
- font-weight: regular;
135
- src: url("https://www.signlanguagerecognition.com/supplementary/MonumentExtended-Regular.otf") format("opentype");
136
- }
137
-
138
- @font-face {
139
- font-family: MonumentExpanded;
140
- font-weight: bold;
141
- src: url("https://www.signlanguagerecognition.com/supplementary/MonumentExtended-Bold.otf") format("opentype");
142
- }
143
-
144
- html {
145
- font-family: "Graphik";
146
- }
147
-
148
- h1 {
149
- font-family: "MonumentExpanded";
150
- }
151
-
152
- #12 {
153
- - background-image: linear-gradient(to left, #61D836, #6CB346) !important;
154
- background-color: #61D836 !important;
155
- }
156
-
157
- #12:hover {
158
- - background-image: linear-gradient(to left, #61D836, #6CB346) !important;
159
- background-color: #6CB346 !important;
160
- border: 0 !important;
161
- border-color: 0 !important;
162
- }
163
-
164
- .dark .gr-button-primary {
165
- --tw-gradient-from: #61D836;
166
- --tw-gradient-to: #6CB346;
167
- border: 0 !important;
168
- border-color: 0 !important;
169
- }
170
-
171
- .dark .gr-button-primary:hover {
172
- --tw-gradient-from: #64A642;
173
- --tw-gradient-to: #58933B;
174
- border: 0 !important;
175
- border-color: 0 !important;
176
- }
177
- """,
178
- cache_examples=True
179
- )
180
-
181
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/fetch_data/places_challenge_train_download.sh DELETED
@@ -1,14 +0,0 @@
1
- mkdir places_challenge_dataset
2
-
3
-
4
- declare -a TARPARTS
5
- for i in {a..z}
6
- do
7
- TARPARTS[${#TARPARTS[@]}]="http://data.csail.mit.edu/places/places365/train_large_split/${i}.tar"
8
- done
9
- ls
10
- printf "%s\n" "${TARPARTS[@]}" > places_challenge_dataset/places365_train.txt
11
-
12
- cd places_challenge_dataset/
13
- xargs -a places365_train.txt -n 1 -P 8 wget [...]
14
- ls *.tar | xargs -i tar xvf {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/utils/env.py DELETED
@@ -1,170 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import importlib
3
- import importlib.util
4
- import logging
5
- import numpy as np
6
- import os
7
- import random
8
- import sys
9
- from datetime import datetime
10
- import torch
11
-
12
- __all__ = ["seed_all_rng"]
13
-
14
-
15
- TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2])
16
- """
17
- PyTorch version as a tuple of 2 ints. Useful for comparison.
18
- """
19
-
20
-
21
- DOC_BUILDING = os.getenv("_DOC_BUILDING", False) # set in docs/conf.py
22
- """
23
- Whether we're building documentation.
24
- """
25
-
26
-
27
- def seed_all_rng(seed=None):
28
- """
29
- Set the random seed for the RNG in torch, numpy and python.
30
-
31
- Args:
32
- seed (int): if None, will use a strong random seed.
33
- """
34
- if seed is None:
35
- seed = (
36
- os.getpid()
37
- + int(datetime.now().strftime("%S%f"))
38
- + int.from_bytes(os.urandom(2), "big")
39
- )
40
- logger = logging.getLogger(__name__)
41
- logger.info("Using a generated random seed {}".format(seed))
42
- np.random.seed(seed)
43
- torch.manual_seed(seed)
44
- random.seed(seed)
45
- os.environ["PYTHONHASHSEED"] = str(seed)
46
-
47
-
48
- # from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
49
- def _import_file(module_name, file_path, make_importable=False):
50
- spec = importlib.util.spec_from_file_location(module_name, file_path)
51
- module = importlib.util.module_from_spec(spec)
52
- spec.loader.exec_module(module)
53
- if make_importable:
54
- sys.modules[module_name] = module
55
- return module
56
-
57
-
58
- def _configure_libraries():
59
- """
60
- Configurations for some libraries.
61
- """
62
- # An environment option to disable `import cv2` globally,
63
- # in case it leads to negative performance impact
64
- disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False))
65
- if disable_cv2:
66
- sys.modules["cv2"] = None
67
- else:
68
- # Disable opencl in opencv since its interaction with cuda often has negative effects
69
- # This envvar is supported after OpenCV 3.4.0
70
- os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled"
71
- try:
72
- import cv2
73
-
74
- if int(cv2.__version__.split(".")[0]) >= 3:
75
- cv2.ocl.setUseOpenCL(False)
76
- except ModuleNotFoundError:
77
- # Other types of ImportError, if happened, should not be ignored.
78
- # Because a failed opencv import could mess up address space
79
- # https://github.com/skvark/opencv-python/issues/381
80
- pass
81
-
82
- def get_version(module, digit=2):
83
- return tuple(map(int, module.__version__.split(".")[:digit]))
84
-
85
- # fmt: off
86
- assert get_version(torch) >= (1, 4), "Requires torch>=1.4"
87
- import fvcore
88
- assert get_version(fvcore, 3) >= (0, 1, 2), "Requires fvcore>=0.1.2"
89
- import yaml
90
- assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1"
91
- # fmt: on
92
-
93
-
94
- _ENV_SETUP_DONE = False
95
-
96
-
97
- def setup_environment():
98
- """Perform environment setup work. The default setup is a no-op, but this
99
- function allows the user to specify a Python source file or a module in
100
- the $DETECTRON2_ENV_MODULE environment variable, that performs
101
- custom setup work that may be necessary to their computing environment.
102
- """
103
- global _ENV_SETUP_DONE
104
- if _ENV_SETUP_DONE:
105
- return
106
- _ENV_SETUP_DONE = True
107
-
108
- _configure_libraries()
109
-
110
- custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE")
111
-
112
- if custom_module_path:
113
- setup_custom_environment(custom_module_path)
114
- else:
115
- # The default setup is a no-op
116
- pass
117
-
118
-
119
- def setup_custom_environment(custom_module):
120
- """
121
- Load custom environment setup by importing a Python source file or a
122
- module, and run the setup function.
123
- """
124
- if custom_module.endswith(".py"):
125
- module = _import_file("detectron2.utils.env.custom_module", custom_module)
126
- else:
127
- module = importlib.import_module(custom_module)
128
- assert hasattr(module, "setup_environment") and callable(module.setup_environment), (
129
- "Custom environment module defined in {} does not have the "
130
- "required callable attribute 'setup_environment'."
131
- ).format(custom_module)
132
- module.setup_environment()
133
-
134
-
135
- def fixup_module_metadata(module_name, namespace, keys=None):
136
- """
137
- Fix the __qualname__ of module members to be their exported api name, so
138
- when they are referenced in docs, sphinx can find them. Reference:
139
- https://github.com/python-trio/trio/blob/6754c74eacfad9cc5c92d5c24727a2f3b620624e/trio/_util.py#L216-L241
140
- """
141
- if not DOC_BUILDING:
142
- return
143
- seen_ids = set()
144
-
145
- def fix_one(qualname, name, obj):
146
- # avoid infinite recursion (relevant when using
147
- # typing.Generic, for example)
148
- if id(obj) in seen_ids:
149
- return
150
- seen_ids.add(id(obj))
151
-
152
- mod = getattr(obj, "__module__", None)
153
- if mod is not None and (mod.startswith(module_name) or mod.startswith("fvcore.")):
154
- obj.__module__ = module_name
155
- # Modules, unlike everything else in Python, put fully-qualitied
156
- # names into their __name__ attribute. We check for "." to avoid
157
- # rewriting these.
158
- if hasattr(obj, "__name__") and "." not in obj.__name__:
159
- obj.__name__ = name
160
- obj.__qualname__ = qualname
161
- if isinstance(obj, type):
162
- for attr_name, attr_value in obj.__dict__.items():
163
- fix_one(objname + "." + attr_name, attr_name, attr_value)
164
-
165
- if keys is None:
166
- keys = namespace.keys()
167
- for objname in keys:
168
- if not objname.startswith("_"):
169
- obj = namespace[objname]
170
- fix_one(objname, objname, obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/transfiner/configs/new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ.py DELETED
@@ -1,14 +0,0 @@
1
- from .mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ import (
2
- dataloader,
3
- lr_multiplier,
4
- model,
5
- optimizer,
6
- train,
7
- )
8
-
9
- train.max_iter *= 4 # 100ep -> 400ep
10
-
11
- lr_multiplier.scheduler.milestones = [
12
- milestone * 4 for milestone in lr_multiplier.scheduler.milestones
13
- ]
14
- lr_multiplier.scheduler.num_updates = train.max_iter
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChallengeHub/Chinese-LangChain/clc/config.py DELETED
@@ -1,18 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding:utf-8 _*-
3
- """
4
- @author:quincy qiang
5
- @license: Apache Licence
6
- @file: config.py
7
- @time: 2023/04/17
8
- @contact: [email protected]
9
- @software: PyCharm
10
- @description: coding..
11
- """
12
-
13
-
14
- class LangChainCFG:
15
- llm_model_name = 'THUDM/chatglm-6b-int4-qe' # 本地模型文件 or huggingface远程仓库
16
- embedding_model_name = 'GanymedeNil/text2vec-large-chinese' # 检索模型文件 or huggingface远程仓库
17
- vector_store_path = '.'
18
- docs_path = './docs'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/commands/times.py DELETED
@@ -1,10 +0,0 @@
1
- from datetime import datetime
2
-
3
-
4
- def get_datetime() -> str:
5
- """Return the current date and time
6
-
7
- Returns:
8
- str: The current date and time
9
- """
10
- return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/processing/html.py DELETED
@@ -1,33 +0,0 @@
1
- """HTML processing functions"""
2
- from __future__ import annotations
3
-
4
- from bs4 import BeautifulSoup
5
- from requests.compat import urljoin
6
-
7
-
8
- def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
9
- """Extract hyperlinks from a BeautifulSoup object
10
-
11
- Args:
12
- soup (BeautifulSoup): The BeautifulSoup object
13
- base_url (str): The base URL
14
-
15
- Returns:
16
- List[Tuple[str, str]]: The extracted hyperlinks
17
- """
18
- return [
19
- (link.text, urljoin(base_url, link["href"]))
20
- for link in soup.find_all("a", href=True)
21
- ]
22
-
23
-
24
- def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
25
- """Format hyperlinks to be displayed to the user
26
-
27
- Args:
28
- hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
29
-
30
- Returns:
31
- List[str]: The formatted hyperlinks
32
- """
33
- return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chintan-Donda/KKMS-KSSW-HF/src/data_loader.py DELETED
@@ -1,230 +0,0 @@
1
- import os
2
- import re
3
- import pandas as pd
4
- from pathlib import Path
5
- import glob
6
-
7
- from llama_index import GPTVectorStoreIndex, download_loader, SimpleDirectoryReader, SimpleWebPageReader
8
- from langchain.document_loaders import PyPDFLoader, TextLoader
9
- from langchain.agents import initialize_agent, Tool
10
- from langchain.llms import OpenAI
11
- from langchain.chains.conversation.memory import ConversationBufferMemory
12
- from langchain.docstore.document import Document
13
-
14
- import src.utils as utils
15
-
16
- import logging
17
- logger = logging.getLogger(__name__)
18
- logging.basicConfig(
19
- format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
20
- )
21
-
22
- import warnings
23
- warnings.filterwarnings('ignore')
24
-
25
-
26
-
27
- class DATA_LOADER:
28
- def __init__(self):
29
- # Instantiate UTILS class object
30
- self.utils_obj = utils.UTILS()
31
-
32
-
33
- def load_documents_from_urls(self, urls=[], doc_type='urls'):
34
- url_documents = self.load_document(doc_type=doc_type, urls=urls)
35
- return url_documents
36
-
37
-
38
- def load_documents_from_pdf(self, doc_filepath='', urls=[], doc_type='pdf'):
39
- if doc_type == 'pdf':
40
- pdf_documents = self.load_document(doc_type=doc_type, doc_filepath=doc_filepath)
41
- elif doc_type == 'online_pdf':
42
- pdf_documents = self.load_document(doc_type=doc_type, urls=urls)
43
- return pdf_documents
44
-
45
-
46
- def load_documents_from_directory(self, doc_filepath='', doc_type='directory'):
47
- doc_documents = self.load_document(doc_type=doc_type, doc_filepath=doc_filepath)
48
- return doc_documents
49
-
50
-
51
- def load_documents_from_text(self, doc_filepath='', doc_type='textfile'):
52
- text_documents = self.load_document(doc_type=doc_type, doc_filepath=doc_filepath)
53
- return text_documents
54
-
55
-
56
- def pdf_loader(self, filepath):
57
- loader = PyPDFLoader(filepath)
58
- return loader.load_and_split()
59
-
60
-
61
- def text_loader(self, filepath):
62
- loader = TextLoader(filepath)
63
- return loader.load()
64
-
65
-
66
- def load_document(self,
67
- doc_type='pdf',
68
- doc_filepath='',
69
- urls=[]
70
- ):
71
- logger.info(f'Loading {doc_type} in raw format from: {doc_filepath}')
72
-
73
- documents = []
74
-
75
- # Validation checks
76
- if doc_type in ['directory', 'pdf', 'textfile']:
77
- if not os.path.exists(doc_filepath):
78
- logger.warning(f"{doc_filepath} does not exist, nothing can be loaded!")
79
- return documents
80
-
81
- elif doc_type in ['online_pdf', 'urls']:
82
- if len(urls) == 0:
83
- logger.warning(f"URLs list empty, nothing can be loaded!")
84
- return documents
85
-
86
-
87
- ######### Load documents #########
88
- # Load PDF
89
- if doc_type == 'pdf':
90
- # Load multiple PDFs from directory
91
- if os.path.isdir(doc_filepath):
92
- pdfs = glob.glob(f"{doc_filepath}/*.pdf")
93
- logger.info(f'Total PDF files to load: {len(pdfs)}')
94
- for pdf in pdfs:
95
- documents.extend(self.pdf_loader(pdf))
96
-
97
- # Loading from a single PDF file
98
- elif os.path.isfile(doc_filepath) and doc_filepath.endswith('.pdf'):
99
- documents.extend(self.pdf_loader(doc_filepath))
100
-
101
- # Load PDFs from online (urls). Can read multiple PDFs from multiple URLs in one-shot
102
- elif doc_type == 'online_pdf':
103
- logger.info(f'URLs to load Online PDFs are from: {urls}')
104
- valid_urls = self.utils_obj.validate_url_format(
105
- urls=urls,
106
- url_type=doc_type
107
- )
108
- for url in valid_urls:
109
- # Load and split PDF pages per document
110
- documents.extend(self.pdf_loader(url))
111
-
112
- # Load data from URLs (can load data from multiple URLs)
113
- elif doc_type == 'urls':
114
- logger.info(f'URLs to load data from are: {urls}')
115
- valid_urls = self.utils_obj.validate_url_format(
116
- urls=urls,
117
- url_type=doc_type
118
- )
119
- # Load data from URLs
120
- docs = SimpleWebPageReader(html_to_text=True).load_data(valid_urls)
121
- docs = [Document(page_content=doc.text) for doc in docs]
122
- documents.extend(docs)
123
-
124
- # Load data from text file(s)
125
- elif doc_type == 'textfile':
126
- # Load multiple text files from directory
127
- if os.path.isdir(doc_filepath):
128
- text_files = glob.glob(f"{doc_filepath}/*.txt")
129
- logger.info(f'Total text files to load: {len(text_files)}')
130
- for tf in text_files:
131
- documents.extend(self.text_loader(tf))
132
-
133
- # Loading from a single text file
134
- elif os.path.isfile(doc_filepath) and doc_filepath.endswith('.txt'):
135
- documents.extend(self.text_loader(doc_filepath))
136
-
137
- # Load data from files on the local directory (files may be of type .pdf, .txt, .doc, etc.)
138
- elif doc_type == 'directory':
139
- # Load multiple PDFs from directory
140
- if os.path.isdir(doc_filepath):
141
- documents = SimpleDirectoryReader(
142
- input_dir=doc_filepath
143
- ).load_data()
144
-
145
- # Loading from a file
146
- elif os.path.isfile(doc_filepath):
147
- documents.extend(SimpleDirectoryReader(
148
- input_files=[doc_filepath]
149
- ).load_data())
150
-
151
- # Load data from URLs in Knowledge Base format
152
- elif doc_type == 'url-kb':
153
- KnowledgeBaseWebReader = download_loader("KnowledgeBaseWebReader")
154
- loader = KnowledgeBaseWebReader()
155
- for url in urls:
156
- doc = loader.load_data(
157
- root_url=url,
158
- link_selectors=['.article-list a', '.article-list a'],
159
- article_path='/articles',
160
- body_selector='.article-body',
161
- title_selector='.article-title',
162
- subtitle_selector='.article-subtitle',
163
- )
164
- documents.extend(doc)
165
-
166
- # Load data from URLs and create an agent chain using ChatGPT
167
- elif doc_type == 'url-chatgpt':
168
- BeautifulSoupWebReader = download_loader("BeautifulSoupWebReader")
169
- loader = BeautifulSoupWebReader()
170
- # Load data from URLs
171
- documents = loader.load_data(urls=urls)
172
- # Build the Vector database
173
- index = GPTVectorStoreIndex(documents)
174
- tools = [
175
- Tool(
176
- name="Website Index",
177
- func=lambda q: index.query(q),
178
- description=f"Useful when you want answer questions about the text retrieved from websites.",
179
- ),
180
- ]
181
-
182
- # Call ChatGPT API
183
- llm = OpenAI(temperature=0) # Keep temperature=0 to search from the given urls only
184
- memory = ConversationBufferMemory(memory_key="chat_history")
185
- agent_chain = initialize_agent(
186
- tools, llm, agent="zero-shot-react-description", memory=memory
187
- )
188
-
189
- output = agent_chain.run(input="What language is on this website?")
190
-
191
-
192
- # Clean documents
193
- documents = self.clean_documents(documents)
194
- logger.info(f'{doc_type} in raw format from: {doc_filepath} loaded successfully!')
195
- return documents
196
-
197
-
198
- def clean_documents(
199
- self,
200
- documents
201
- ):
202
- cleaned_documents = []
203
- for document in documents:
204
- if hasattr(document, 'page_content'):
205
- document.page_content = self.utils_obj.replace_newlines_and_spaces(document.page_content)
206
- elif hasattr(document, 'text'):
207
- document.text = self.utils_obj.replace_newlines_and_spaces(document.text)
208
- else:
209
- document = self.utils_obj.replace_newlines_and_spaces(document)
210
- cleaned_documents.append(document)
211
- return cleaned_documents
212
-
213
-
214
- def load_external_links_used_by_FTAs(self,
215
- sheet_filepath='./data/urls_used_by_ftas/external_links_used_by_FTAs.xlsx'
216
- ):
217
- xls = pd.ExcelFile(sheet_filepath)
218
- df = pd.DataFrame(columns=['S.No.', 'Link used for', 'Link type', 'Link'])
219
- for sheet_name in xls.sheet_names:
220
- sheet = pd.read_excel(xls, sheet_name)
221
- if sheet.shape[0] > 0:
222
- df = pd.concat([df, sheet])
223
- else:
224
- logger.info(f'{sheet_name} has no content.')
225
-
226
- df = df[['Link used for', 'Link type', 'Link']]
227
- # Clean df
228
- df = self.utils_obj.clean_df(df)
229
- logger.info(f'Total links available across all cities: {df.shape[0]}')
230
- return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/dirs.py DELETED
@@ -1,225 +0,0 @@
1
- # https://github.com/nonebot/plugin-localstore
2
- """
3
- MIT License
4
-
5
- Copyright (c) 2021 NoneBot
6
-
7
- Permission is hereby granted, free of charge, to any person obtaining a copy
8
- of this software and associated documentation files (the "Software"), to deal
9
- in the Software without restriction, including without limitation the rights
10
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
- copies of the Software, and to permit persons to whom the Software is
12
- furnished to do so, subject to the following conditions:
13
-
14
- The above copyright notice and this permission notice shall be included in all
15
- copies or substantial portions of the Software.
16
-
17
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
- SOFTWARE.
24
- """
25
-
26
-
27
- import os
28
- import sys
29
- from pathlib import Path
30
- from typing import Callable, Literal
31
-
32
- from typing_extensions import ParamSpec
33
-
34
- WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")
35
-
36
-
37
- def user_cache_dir(appname: str) -> Path:
38
- r"""
39
- Return full path to the user-specific cache dir for this application.
40
- "appname" is the name of application.
41
- Typical user cache directories are:
42
- macOS: ~/Library/Caches/<AppName>
43
- Unix: ~/.cache/<AppName> (XDG default)
44
- Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
45
- On Windows the only suggestion in the MSDN docs is that local settings go
46
- in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
47
- non-roaming app data dir (the default returned by `user_data_dir`). Apps
48
- typically put cache data somewhere *under* the given dir here. Some
49
- examples:
50
- ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
51
- ...\Acme\SuperApp\Cache\1.0
52
- OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
53
- """
54
- if WINDOWS:
55
- return _get_win_folder("CSIDL_LOCAL_APPDATA") / appname / "Cache"
56
- elif sys.platform == "darwin":
57
- return Path("~/Library/Caches").expanduser() / appname
58
- else:
59
- return Path(os.getenv("XDG_CACHE_HOME", "~/.cache")).expanduser() / appname
60
-
61
-
62
- def user_data_dir(appname: str, roaming: bool = False) -> Path:
63
- r"""
64
- Return full path to the user-specific data dir for this application.
65
- "appname" is the name of application.
66
- If None, just the system directory is returned.
67
- "roaming" (boolean, default False) can be set True to use the Windows
68
- roaming appdata directory. That means that for users on a Windows
69
- network setup for roaming profiles, this user data will be
70
- sync'd on login. See
71
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
72
- for a discussion of issues.
73
- Typical user data directories are:
74
- macOS: ~/Library/Application Support/<AppName>
75
- Unix: ~/.local/share/<AppName> # or in
76
- $XDG_DATA_HOME, if defined
77
- Win XP (not roaming): C:\Documents and Settings\<username>\ ...
78
- ...Application Data\<AppName>
79
- Win XP (roaming): C:\Documents and Settings\<username>\Local ...
80
- ...Settings\Application Data\<AppName>
81
- Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppName>
82
- Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppName>
83
- For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
84
- That means, by default "~/.local/share/<AppName>".
85
- """
86
- if WINDOWS:
87
- const = "CSIDL_APPDATA" if roaming else "CSIDL_LOCAL_APPDATA"
88
- return Path(_get_win_folder(const)) / appname
89
- elif sys.platform == "darwin":
90
- return Path("~/Library/Application Support/").expanduser() / appname
91
- else:
92
- return Path(os.getenv("XDG_DATA_HOME", "~/.local/share")).expanduser() / appname
93
-
94
-
95
- def user_config_dir(appname: str, roaming: bool = True) -> Path:
96
- """Return full path to the user-specific config dir for this application.
97
- "appname" is the name of application.
98
- If None, just the system directory is returned.
99
- "roaming" (boolean, default True) can be set False to not use the
100
- Windows roaming appdata directory. That means that for users on a
101
- Windows network setup for roaming profiles, this user data will be
102
- sync'd on login. See
103
- <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
104
- for a discussion of issues.
105
- Typical user data directories are:
106
- macOS: same as user_data_dir
107
- Unix: ~/.config/<AppName>
108
- Win *: same as user_data_dir
109
- For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
110
- That means, by default "~/.config/<AppName>".
111
- """
112
- if WINDOWS:
113
- return user_data_dir(appname, roaming=roaming)
114
- elif sys.platform == "darwin":
115
- return user_data_dir(appname)
116
- else:
117
- return Path(os.getenv("XDG_CONFIG_HOME", "~/.config")).expanduser() / appname
118
-
119
-
120
- # -- Windows support functions --
121
- def _get_win_folder_from_registry(
122
- csidl_name: Literal["CSIDL_APPDATA", "CSIDL_COMMON_APPDATA", "CSIDL_LOCAL_APPDATA"]
123
- ) -> Path:
124
- """
125
- This is a fallback technique at best. I'm not sure if using the
126
- registry for this guarantees us the correct answer for all CSIDL_*
127
- names.
128
- """
129
- import winreg
130
-
131
- shell_folder_name = {
132
- "CSIDL_APPDATA": "AppData",
133
- "CSIDL_COMMON_APPDATA": "Common AppData",
134
- "CSIDL_LOCAL_APPDATA": "Local AppData",
135
- }[csidl_name]
136
-
137
- key = winreg.OpenKey(
138
- winreg.HKEY_CURRENT_USER,
139
- r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
140
- )
141
- directory, _type = winreg.QueryValueEx(key, shell_folder_name)
142
- return Path(directory)
143
-
144
-
145
- def _get_win_folder_with_ctypes(
146
- csidl_name: Literal["CSIDL_APPDATA", "CSIDL_COMMON_APPDATA", "CSIDL_LOCAL_APPDATA"]
147
- ) -> Path:
148
- csidl_const = {
149
- "CSIDL_APPDATA": 26,
150
- "CSIDL_COMMON_APPDATA": 35,
151
- "CSIDL_LOCAL_APPDATA": 28,
152
- }[csidl_name]
153
-
154
- buf = ctypes.create_unicode_buffer(1024)
155
- ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
156
-
157
- # Downgrade to short path name if have highbit chars. See
158
- # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
159
- has_high_char = any(ord(c) > 255 for c in buf)
160
- if has_high_char:
161
- buf2 = ctypes.create_unicode_buffer(1024)
162
- if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
163
- buf = buf2
164
-
165
- return Path(buf.value)
166
-
167
-
168
- if WINDOWS:
169
- try:
170
- import ctypes
171
-
172
- _get_win_folder = _get_win_folder_with_ctypes
173
- except ImportError:
174
- _get_win_folder = _get_win_folder_from_registry
175
-
176
-
177
- P = ParamSpec("P")
178
-
179
- APP_NAME = "meme_generator"
180
- BASE_CACHE_DIR = user_cache_dir(APP_NAME).resolve()
181
- BASE_CONFIG_DIR = user_config_dir(APP_NAME).resolve()
182
- BASE_DATA_DIR = user_data_dir(APP_NAME).resolve()
183
-
184
-
185
- def _ensure_dir(path: Path) -> None:
186
- if not path.exists():
187
- path.mkdir(parents=True, exist_ok=True)
188
- elif not path.is_dir():
189
- raise RuntimeError(f"{path} is not a directory")
190
-
191
-
192
- def _auto_create_dir(func: Callable[P, Path]) -> Callable[P, Path]:
193
- def wrapper(*args: P.args, **kwargs: P.kwargs) -> Path:
194
- path = func(*args, **kwargs)
195
- _ensure_dir(path)
196
- return path
197
-
198
- return wrapper
199
-
200
-
201
- @_auto_create_dir
202
- def get_cache_dir() -> Path:
203
- return BASE_CACHE_DIR
204
-
205
-
206
- def get_cache_file(filename: str) -> Path:
207
- return get_cache_dir() / filename
208
-
209
-
210
- @_auto_create_dir
211
- def get_config_dir() -> Path:
212
- return BASE_CONFIG_DIR
213
-
214
-
215
- def get_config_file(filename: str) -> Path:
216
- return get_config_dir() / filename
217
-
218
-
219
- @_auto_create_dir
220
- def get_data_dir() -> Path:
221
- return BASE_DATA_DIR
222
-
223
-
224
- def get_data_file(filename: str) -> Path:
225
- return get_data_dir() / filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Clara998/DisneyPixarMovie/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: DisneyPixarMovie
3
- emoji: 😻
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference